python_code
stringlengths 0
456k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with shared weights between wrappers using a model with mevo kernel. """
from copy import deepcopy
import pytest
import torch
from torch import nn
import torch.multiprocessing as mp
from torch.optim import SGD
from fairscale.experimental.nn import MEVO
from fairscale.fair_dev.testing.testing import (
dist_init,
in_circle_ci,
objects_are_equal,
skip_if_single_gpu,
teardown,
temp_files_ctx,
)
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
VOCAB = 4
D_MODEL = 2
BS = 2
SEQ = 3
TILE = 2
_large = True
if _large:
# We used to have 50K VOCAB in this test, but it seems to be flaky on CI's GPU machines and
# it does consume significant GPU memory. Reducing to 10K might help here.
VOCAB = 1024 * 10
D_MODEL = 1024
BS = 2
SEQ = 16
TILE = 16
class Model(nn.Module):
def __init__(self, with_fsdp=False, wrap_middle="none"):
super().__init__()
self.l0 = nn.Embedding(VOCAB, D_MODEL).cuda().half()
nn.init.uniform_(self.l0.weight, -1.0e-1, 1.0e-1)
self.l1 = MEVO(self.l0.weight, tile_factor=TILE, reduction="sum")
self.middle = nn.Linear(D_MODEL, D_MODEL).cuda().half()
# LNs are not strictly needed for this test, but they help reduce the loss quickly
# and improves the numerical stability.
self.ln1 = nn.LayerNorm(D_MODEL).cuda().half()
self.ln2 = nn.LayerNorm(D_MODEL).cuda().half()
if with_fsdp:
# Shared layers must be un-flatten.
self.l0 = FSDP(self.l0, flatten_parameters=False, mixed_precision=False, compute_dtype=torch.float16)
self.l1 = FSDP(self.l1, flatten_parameters=False, mixed_precision=False, compute_dtype=torch.float16)
self.l1.append_shared_param(self.l0.module.weight)
# These are for debugging.
# print(id(self.l0), "is emb")
# print(id(self.l1), "is out")
assert wrap_middle in ["none", "flat", "nonflat"]
if wrap_middle != "none":
self.middle = FSDP(
self.middle,
flatten_parameters=wrap_middle == "flat",
mixed_precision=False,
compute_dtype=torch.float16,
)
# print(id(self.middle), "is middle")
def forward(self, x):
target = x + 1
x = self.l0(x)
x = self.ln1(x)
x = self.middle(x)
x = self.ln2(x)
x = self.l1(x, target)
print("LOSS", x.item())
assert x.item() not in [float("-inf"), float("inf")]
return x
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
# dist_init needs 2 files + 3 files for before state, after state, in_data.
with temp_files_ctx(5) as files:
yield files
@skip_if_single_gpu
@pytest.mark.parametrize("wrap_middle", ["none", "flat", "nonflat"])
@pytest.mark.parametrize("test_fn", ["train", "eval", "optim_state"])
def test_shared_weight_mevo(temp_files, wrap_middle, test_fn):
"""Test FSDP with a model with shared weights."""
if test_fn == "optim_state":
if wrap_middle != "flat":
pytest.skip("only support optim_state when root and middle part is flat")
world_size = 2
# Get ref.
model = Model()
sd_before = deepcopy(model.state_dict())
in_data = (torch.rand(BS, SEQ) * (VOCAB - 1)).cuda().long()
if test_fn == "train":
_train(model, in_data, world_size)
sd_after = deepcopy(model.state_dict())
# Before and after state should not be equal.
assert not objects_are_equal(sd_before, sd_after)
# Save data
torch.save(sd_before, temp_files[2])
if test_fn == "train":
torch.save(sd_after, temp_files[3])
torch.save(in_data, temp_files[4])
# Run FSDP
mp.spawn(
_dist_worker,
(world_size, temp_files, wrap_middle, test_fn),
nprocs=world_size,
)
def _dist_worker(rank, world_size, files, wrap_middle, test_fn):
# Get data from files.
file1, file2, sd_before, sd_after, in_data = files
sd_before = torch.load(sd_before, map_location=lambda storage, loc: storage.cuda(rank))
if test_fn == "train":
sd_after = torch.load(sd_after, map_location=lambda storage, loc: storage.cuda(rank))
in_data = torch.load(in_data, map_location=lambda storage, loc: storage.cuda(rank))
result = dist_init(rank=rank, world_size=world_size, filename=file1, filename_rpc=file2)
assert result, "Dist init failed"
fsdp_model = FSDP(
# To debug: first make with_fsdp=False (no inner wrapping) work, then enable inner wrapping
# and make that work.
Model(with_fsdp=True, wrap_middle=wrap_middle),
flatten_parameters=test_fn == "optim_state",
mixed_precision=False,
compute_dtype=torch.float16,
)
fsdp_model.load_state_dict(sd_before)
if test_fn == "train":
_train(fsdp_model, in_data)
# We don't raise exceptions in CI since CI's T4 machine seems to be flaky with this test.
# On devel machines, we do want to catch potential errors. There could be real bugs or
# system issues behind the flakiness. One example is all-reduce vs. simulated averaging
# below. The check also fails on my rtx 20xx. So maybe it only works on devfair with
# Quadro GP100 GPUs. TODO (Min): debug this.
objects_are_equal(sd_after, fsdp_model.state_dict(), raise_exception=not in_circle_ci())
elif test_fn == "eval":
_eval(fsdp_model, in_data)
elif test_fn == "optim_state":
optim = SGD(fsdp_model.parameters(), lr=0.1)
for _ in range(3):
out = fsdp_model(in_data)
out.backward()
optim.step()
sd = fsdp_model.gather_full_optim_state_dict(optim)
if rank == 0:
# There should 8 momentum buffers in the state.
assert len(sd["state"].keys()) == 8
else:
assert sd is None, "only rank 0 should have the optim state"
else:
assert 0, f"invalid test_fn {test_fn}"
teardown()
def _eval(model, in_data):
# run in eval mode
model.eval()
for _ in range(5):
out = model(in_data)
# adding torch.no_grad()
for _ in range(5):
with torch.no_grad():
out = model(in_data)
def _train(model, in_data, steps_per_iter=1):
optim = SGD(model.parameters(), lr=0.1)
for _ in range(3):
# Simulate multiple ranks.
for _ in range(steps_per_iter):
out = model(in_data)
out.backward()
# Simulate gradient means between ranks.
if steps_per_iter > 1:
with torch.no_grad():
for p in model.parameters():
p.grad /= steps_per_iter
with torch.no_grad():
for p in model.parameters():
assert not torch.isinf(p.grad).any() and not torch.isnan(p.grad).any()
optim.step()
model.zero_grad(set_to_none=True)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing ShardedDDP
"""
from contextlib import suppress
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import Linear, Sequential
from fairscale.fair_dev.testing.testing import (
GPT2,
SGDWithPausingCompute,
available_devices,
check_same_models_across_ranks,
skip_if_less_than_four_gpu,
skip_if_no_cuda,
skip_if_single_gpu,
temp_files_ctx,
)
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim import OSS
def _get_mlp(tripwire: bool = False):
if not tripwire:
return Sequential(Linear(2, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3), Linear(3, 3))
class Tripwire(torch.nn.Module):
"""A model made to expose possible corner cases"""
def __init__(self) -> None:
super().__init__()
self.model = Linear(2, 3, bias=False)
# mismatched types in between trainable or not, can trip the buckets for instance
self.register_parameter("tripwire", torch.nn.Parameter(torch.LongTensor((3, 3)), requires_grad=False))
def forward(self, x):
return self.model(x)
return Tripwire()
class _DoubleInput(torch.nn.Module):
def __init__(self):
super().__init__()
self.mlp = _get_mlp()
def forward(self, x, y):
x1 = self.mlp(x)
x2 = self.mlp(y)
return torch.cat((x1, x2), dim=1)
def run_one_step(
rank,
world_size,
backend,
device,
temp_file_name,
broadcast_buffers,
grad_accumulation,
reduce_buffer_size,
optimizer_type,
reduce_fp16=False,
):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
if device == torch.device("cuda"):
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
# Any model works. Add one different buffer per rank
model = _get_mlp()
model.register_buffer("test_buffer", torch.ones(1) * rank)
model.to(device)
next(model.parameters()).requires_grad = False # Test non-trainable parameters
optimizer_settings = {"lr": 1e-3, "momentum": 0.99}
if optimizer_type == SGDWithPausingCompute:
optimizer_settings["rank"] = rank
optimizer = OSS(params=model.parameters(), optim=optimizer_type, **optimizer_settings)
ddp_model = ShardedDataParallel(
model,
optimizer,
broadcast_buffers=broadcast_buffers,
reduce_buffer_size=reduce_buffer_size,
reduce_fp16=reduce_fp16,
)
# The model should be synchronized in between the ranks at ShardedDataParallel construction time, check that
check_same_models_across_ranks(
ddp_model, dist.group.WORLD, params_should_be_equal=True, check_broadcast_buffers=broadcast_buffers
)
# Optim loop
def closure():
ddp_model.zero_grad(set_to_none=True)
with ddp_model.no_sync() if grad_accumulation else suppress():
input_tensor = torch.rand((64, 2)).to(device)
loss = ddp_model(input_tensor).abs().sum()
# If grad_accumulation, we can check after the forward that the models are different
# (not synced)
if grad_accumulation:
check_same_models_across_ranks(
ddp_model, dist.group.WORLD, params_should_be_equal=False, check_broadcast_buffers=True
)
loss.backward()
return loss
# The models should stay the same in between the ranks
for i in range(5):
_ = optimizer.step(closure=closure)
# For a sync of all the streams
if device.type == torch.device("cuda").type:
torch.cuda.synchronize(device=device)
# when running on cpu/gloo the "nodes" are not really different
same_params = device == torch.device("cpu") or not grad_accumulation
check_same_models_across_ranks(
ddp_model, dist.group.WORLD, params_should_be_equal=same_params, check_broadcast_buffers=broadcast_buffers
)
dist.destroy_process_group()
def run_test(backend, device, world_size, broadcast_buffers, grad_accumulation, reduce_buffer_size, optimizer_type):
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_one_step,
args=(world_size, backend, device, temp_files[0], broadcast_buffers, grad_accumulation, reduce_buffer_size),
nprocs=world_size,
join=True,
)
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("broadcast_buffers", [True, False])
@pytest.mark.parametrize("grad_accumulation", [True, False])
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
@pytest.mark.parametrize("optimizer_type", [torch.optim.SGD, SGDWithPausingCompute])
@pytest.mark.parametrize("reduce_fp16", [False, True])
@pytest.mark.parametrize(
"setup",
[
[dist.Backend.NCCL, torch.device("cuda")],
[dist.Backend.GLOO, torch.device("cpu")],
[dist.Backend.GLOO, torch.device("cuda")],
],
)
def test_step(broadcast_buffers, grad_accumulation, reduce_buffer_size, optimizer_type, reduce_fp16, setup):
world_size = 2
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_one_step,
args=(
world_size,
setup[0],
setup[1],
temp_files[0],
broadcast_buffers,
grad_accumulation,
reduce_buffer_size,
optimizer_type,
reduce_fp16,
),
nprocs=world_size,
join=True,
)
def run_test_two_inputs(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
if device == "cuda":
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
model = _DoubleInput().to(device)
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer, reduce_buffer_size=reduce_buffer_size)
# Optim loop
def closure():
ddp_model.zero_grad(set_to_none=True)
input_tensor = torch.rand((64, 2)).to(device)
loss = ddp_model(input_tensor, input_tensor).abs().sum()
loss.backward()
return loss
for _ in range(5):
_ = optimizer.step(closure=closure)
dist.destroy_process_group()
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
@pytest.mark.parametrize("device", available_devices)
@skip_if_single_gpu
def test_inputs(reduce_buffer_size, backend, device):
# Check that the ShardedDDP wrapper accepts tuple(tensors) as inputs
world_size = 2
if backend == "nccl" and device == "cpu":
pytest.skip("Incompatible combination, or cuda not available")
return
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_two_inputs,
args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
nprocs=world_size,
join=True,
)
def test_ddp_attributes():
# Check that ShardedDDP exposes the same attributes as Pytorch's DDP
# - is multi_device_module
# - device_type
with temp_files_ctx(num=1) as temp_files:
dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
model = Sequential(Linear(2, 3), Linear(3, 3))
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer)
assert hasattr(ddp_model, "is_multi_device_module")
assert hasattr(ddp_model, "device_type")
assert hasattr(ddp_model, "module")
dist.destroy_process_group()
def test_random_attributes():
with temp_files_ctx(num=1) as temp_files:
# Check that ShardedDDP exposes the original module's attributes
dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
model = Sequential(Linear(2, 3), Linear(3, 3))
model.banana = "sweet"
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer)
assert hasattr(ddp_model, "banana")
assert not hasattr(ddp_model, "orange")
dist.destroy_process_group()
def test_catch_grad_grad():
with temp_files_ctx(num=1) as temp_files:
# Check that ShardedDDP exposes the original module's attributes
dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
model = Sequential(Linear(2, 3), Linear(3, 3))
model.train()
chained_grad = torch.zeros_like(next(model.parameters()))
chained_grad.requires_grad = True
next(model.parameters()).grad = chained_grad
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer)
inputs = torch.rand(100, 2)
with pytest.raises(RuntimeError):
_ = ddp_model(inputs)
dist.destroy_process_group()
def test_mixed_types():
with temp_files_ctx(num=1) as temp_files:
# Check that ShardedDDP exposes the original module's attributes
dist.init_process_group(init_method="file://" + temp_files[0], backend="gloo", rank=0, world_size=1)
model = _get_mlp(tripwire=True)
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
model = ShardedDataParallel(model, optimizer)
input_tensor = torch.rand((2, 2))
_ = model(input_tensor)
dist.destroy_process_group()
def run_test_train_eval_change(rank, world_size, file):
# Check that ShardedDDP handles the switch from training to eval properly
dist.init_process_group(init_method="file://" + file, backend="gloo", rank=rank, world_size=world_size)
model = _get_mlp()
model.train()
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
model = ShardedDataParallel(model, optimizer)
input_tensor = torch.rand((2, 2))
loss = model(input_tensor).sum()
loss.backward() # make sure that the gradients are reduced
# Wipe the gradients and switch to eval mode
model.zero_grad()
model.eval()
_ = model(input_tensor)
assert next(model.parameters()).grad is None or torch.norm(next(model.parameters()).grad) < 1e-6
# Get back to training
model = model.train()
model(input_tensor).sum().backward()
assert torch.norm(next(model.parameters()).grad) > 0.0
dist.destroy_process_group()
def test_train_eval_change():
world_size = 4
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_train_eval_change,
args=(world_size, temp_files[0]),
nprocs=world_size,
join=True,
)
def run_test_device_change(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
# Check that the wrapped module can change devices
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
model = Sequential(Linear(2, 3), Linear(3, 3)).cpu() # not device on purpose, test changing it after the fact
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(
model, optimizer, sync_models_at_startup=False, reduce_buffer_size=reduce_buffer_size
)
try:
ddp_model.to(device)
assert False, "Changing devices should be caught and not supported"
except AssertionError:
pass
# Check that we can change the data type
ddp_model.to(device=torch.device("cpu"), dtype=torch.float16)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
def test_device_change(reduce_buffer_size):
# Check that ShardedDDP handles a device change properly
world_size = 2
backend = "nccl"
with temp_files_ctx(num=1) as temp_files:
device = "cuda"
mp.spawn(
run_test_device_change,
args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
nprocs=world_size,
join=True,
)
def run_test_training_change(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
group = dist.init_process_group(
init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size
)
torch.cuda.set_device(rank)
model = Sequential(Linear(2, 3), Linear(3, 3)).to(device)
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer, process_group=group, reduce_buffer_size=reduce_buffer_size)
inputs = torch.rand((10, 2), device=device)
outputs = ddp_model(inputs) # assert if the module has not been changed properly
_ = outputs.norm().backward()
ddp_model.eval()
ddp_model(inputs) # This will assert if eval() is not properly taken into account
ddp_model(inputs)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
def test_training_change(reduce_buffer_size):
world_size = 2
backend = "nccl"
device = "cuda"
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_training_change,
args=(world_size, backend, device, temp_files[0], reduce_buffer_size),
nprocs=world_size,
join=True,
)
def run_test_ddp_sync_batch_norm(rank, world_size, backend, device, temp_file_name):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
model = Sequential(Linear(2, 3), torch.nn.BatchNorm1d(3), Linear(3, 3)).to(device)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.to(device) # in pytorch 1.5 syncBN switches to the default device/cpu
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer)
assert isinstance(model[1], torch.nn.SyncBatchNorm)
# Ensures sync batch norm handles have been added
ddp_model(torch.randn(2, 2).to(device))
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
def test_ddp_sync_batch_norm():
# Check that ShardedDDP is compatible with sync batch norm across multiple GPUs
world_size = 2
backend = "gloo"
device = "cuda"
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_ddp_sync_batch_norm,
args=(world_size, backend, device, temp_files[0]),
nprocs=world_size,
join=True,
)
def run_test_two_optimizers(rank, world_size, backend, device, temp_file_name):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
if device == torch.device("cuda"):
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
model = _DoubleInput().to(device)
parameters = list(model.parameters())
optimizer_1 = OSS(params=parameters[:-10], optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
optimizer_2 = OSS(params=parameters[-10:], optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, [optimizer_1, optimizer_2])
# Optim loop
def closure():
input_tensor = torch.rand((64, 2)).to(device)
loss = ddp_model(input_tensor, input_tensor).abs().sum()
loss.backward()
return loss
for i in range(5):
optimizer_1.zero_grad()
optimizer_2.zero_grad()
_ = optimizer_1.step(closure=closure)
_ = optimizer_2.step(closure=closure)
dist.destroy_process_group()
def test_two_optimizers():
# Check that the ShardedDDP wrapper accepts tuple(tensors) as inputs
world_size = 2
backend = "gloo"
device = "cpu"
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_two_optimizers, args=(world_size, backend, device, temp_files[0]), nprocs=world_size, join=True
)
def run_test_gpt2(rank, world_size, backend, device, temp_file_name, reduce_buffer_size):
INPUT_DIM = 16
BACH_SIZE = 10
STEPS = 10
url = "file://" + temp_file_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
model = GPT2(
embed_dim=256, num_heads=2, num_layers=12, num_positions=INPUT_DIM * INPUT_DIM, num_vocab=512, num_classes=2
)
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-3, momentum=0.99)
ddp_model = ShardedDataParallel(model, optimizer, reduce_buffer_size=reduce_buffer_size)
# Move the model to another device post-construction
model = model.to(device)
# Optim loop
set_to_none = True
def closure():
nonlocal set_to_none
ddp_model.zero_grad(set_to_none=set_to_none)
set_to_none = not set_to_none
# Force int inputs to prevent the first grad from firing
input_tensor = torch.randint(10, (BACH_SIZE, INPUT_DIM)).to(device)
loss = ddp_model(input_tensor).abs().sum()
loss.backward()
return loss
# Check for bucketing overflows
for i in range(STEPS):
_ = optimizer.step(closure=closure)
# Stress test the .to() method
ddp_model.to(device=device, dtype=torch.float16)
ddp_model.to(device=device, dtype=torch.float32)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("world_size", [1, 2])
@pytest.mark.parametrize("reduce_buffer", [2**23, 2**40])
def test_gpt2(world_size, reduce_buffer):
# Check that having trainable unused params is fine
backend = "gloo"
device = "cuda"
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_gpt2,
args=(world_size, backend, device, temp_files[0], reduce_buffer),
nprocs=world_size,
join=True,
)
def run_test_multiple_groups(rank, world_size, tempfile_name, backend, reduce_buffer_size):
# Only work with the even ranks, to check that the global_rank indexing is properly used
dist.init_process_group(init_method="file://" + tempfile_name, backend=backend, rank=rank, world_size=world_size)
sub_group_ranks = [0, 2]
process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend=backend)
# Make sure that all the ranks get different training data
# So that the sync check in between their models is meaningful
torch.manual_seed(rank)
np.random.seed(rank)
# Standard deep learning setup
device = "cuda"
torch.cuda.set_device(rank)
epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer, model):
# Just run a couple of epochs, check that the model is properly updated
for _ in range(epochs):
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Check that all the params are the same on all ranks
check_same_models_across_ranks(
model, process_group, params_should_be_equal=True, check_broadcast_buffers=True
)
if rank in sub_group_ranks:
# Model not-fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = OSS(model.parameters(), group=process_group, lr=1e-3, momentum=0.99)
model = ShardedDataParallel(
model, optimizer, process_group=process_group, reduce_buffer_size=reduce_buffer_size
)
check(optimizer, model)
dist.destroy_process_group(process_group)
@skip_if_less_than_four_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
def test_multiple_groups(reduce_buffer_size, backend):
world_size = 4
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_test_multiple_groups,
args=(world_size, temp_files[0], backend, reduce_buffer_size),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with pre-backward hook bug. """
import pytest
import torch
from torch.nn import Linear, Module
from fairscale.fair_dev.testing.testing import dist_init, skip_if_no_cuda, teardown, temp_files_ctx
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
# dist_init needs 2 files
with temp_files_ctx(2) as files:
yield files
@skip_if_no_cuda
def test_pre_backward_hook(temp_files):
"""Test FSDP with a model that triggers a pre_backward hook bug."""
result = dist_init(rank=0, world_size=1, filename=temp_files[0], filename_rpc=temp_files[1])
assert result, "Dist init failed"
class Model(Module):
def __init__(self):
super().__init__()
self.l1 = Linear(4, 4).cuda()
self.l2 = FSDP(Linear(4, 4).cuda())
self.l3 = Linear(4, 4).cuda()
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
inner_result = x
x = self.l3(x)
return x, inner_result
def assert_and_clear_grad(self):
for p in self.parameters():
assert p.shape in [(4, 4), (4,), (4 * 4 + 4,)], p.shape
assert p.grad is not None
p.grad = None
model = FSDP(Model(), flatten_parameters=False).cuda()
in_data = torch.rand(1, 4).cuda()
for _ in range(3):
out, _ = model(in_data)
out.sum().backward()
model.assert_and_clear_grad()
teardown()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import functools
import itertools
import unittest
from unittest.mock import patch
from parameterized import parameterized
import torch
from fairscale.fair_dev.testing.testing import DummyProcessGroup, make_cudnn_deterministic, objects_are_equal
from fairscale.nn.data_parallel import FullyShardedDataParallel
from .test_fsdp import DistributedTest, NestedWrappedModule, rename_test, spawn_and_init
class TestGradAcc(DistributedTest):
def test_transformer(self):
fn = functools.partial(self._test_transformer, config={})
spawn_and_init(fn)
def test_transformer_grad_acc_without_no_sync(self):
fn = functools.partial(self._test_transformer, config={}, use_no_sync_context=False)
spawn_and_init(fn)
def test_transformer_no_flat_params(self):
config = {"flatten_parameters": False}
fn = functools.partial(self._test_transformer, config=config)
spawn_and_init(fn)
def test_nested_wrapper(self):
fn = functools.partial(self._test_nested_wrapper, config={})
spawn_and_init(fn)
def test_no_sync_before_first_forward(self):
group = DummyProcessGroup(rank=0, size=1)
dummy_group_reduce_scatter = DummyProcessGroup(rank=group.rank(), size=group.size())
config = {"process_group_reduce_scatter", dummy_group_reduce_scatter}
model = self.get_wrapped_model(group, config, add_bn=False)
batch = model.module.get_input(torch.device("cuda"))
with model.no_sync():
output = model(*batch)
loss = model.module.get_loss(batch, output)
loss.backward()
output = model(*batch)
loss = model.module.get_loss(batch, output)
loss.backward()
@classmethod
def _test_transformer(self, rank, group, config, use_no_sync_context=True):
model = self.get_wrapped_model(group, config=config, add_bn=False)
model.eval() # turn off dropout for the test
self._test_grad_acc(model, batch_dim=1, use_no_sync_context=use_no_sync_context)
@classmethod
def _test_nested_wrapper(self, rank, group, config):
model = NestedWrappedModule(group, config)
model = FullyShardedDataParallel(model, group, **config).cuda()
self._test_grad_acc(model, batch_dim=0)
@classmethod
def _test_grad_acc(self, model, batch_dim, use_no_sync_context=True):
make_cudnn_deterministic()
# Generate two input batches. We'll test that we get the same grads if
# we train on them sequentially while accumulating grads (with no_sync
# or without no_sync) vs. concatenating the batches and training in one go.
#
# The difference between with no_sync and without is GPU memory vs. networking
# bandwidth tradeoff.
batch1 = model.module.get_input(torch.device("cuda"))
assert isinstance(batch1, tuple)
batch2 = tuple(
# This randomly permutes the values in a multi-dim tensor.
x.view(-1)[torch.randperm(x.numel())].view_as(x)
for x in batch1
)
for x, y in zip(batch1, batch2):
assert not torch.all(x == y)
# Concat the batches along batch dimension.
concat_batch = tuple(torch.cat((x, y), dim=batch_dim) for (x, y) in zip(batch1, batch2))
# Establish reference behavior on the concat batch.
model.zero_grad()
output = model(*concat_batch)
ref_loss = model.module.get_loss(concat_batch, output)
ref_loss.backward()
ref_grads = [p.grad.detach().clone() for p in model.parameters()]
# Test that we get the same results by accumulating grads.
model.zero_grad()
context = contextlib.suppress()
if use_no_sync_context:
context = model.no_sync()
with context: # accumulate gradients from the first batch
output = model(*batch1)
loss1 = model.module.get_loss(batch1, output)
loss1.backward()
output = model(*batch2)
loss2 = model.module.get_loss(batch2, output)
loss2.backward()
accumulated_loss = loss1 + loss2
accumulated_grads = [p.grad.detach().clone() for p in model.parameters()]
torch.testing.assert_allclose(ref_loss, accumulated_loss)
assert objects_are_equal(ref_grads, accumulated_grads, raise_exception=True)
keys = ["reshard_after_forward", "mixed_precision"]
COMM_CONFIG_OPTIONS = [[dict(zip(keys, config))] for config in itertools.product([True, False], repeat=len(keys))]
class TestGradAccCommunication(DistributedTest):
@parameterized.expand(COMM_CONFIG_OPTIONS, name_func=rename_test)
def test_communication(self, config):
fn = functools.partial(self._test_communication, config=config)
spawn_and_init(fn)
@parameterized.expand(COMM_CONFIG_OPTIONS, name_func=rename_test)
def test_communication_nested(self, config):
fn = functools.partial(self._test_communication, config=config, nested_model=True)
spawn_and_init(fn)
@classmethod
def _test_communication(self, rank, group, config, nested_model=False):
if group.size() == 1:
return
# Turn off bucketing to accurately count number of reduce_scatters.
config["bucket_cap_mb"] = 0
if nested_model:
model = NestedWrappedModule(group, config)
model = FullyShardedDataParallel(model, group, **config).cuda()
else:
model = self.get_wrapped_model(group, config=config)
num_fsdp = 0
for child in model.modules(): # includes self
if isinstance(child, FullyShardedDataParallel) and len(child.params) > 0:
num_fsdp += 1
if config.get("reshard_after_forward", True):
# inside no_sync:
# num_fsdp all-gathers in the forward
# num_fsdp-1 all-gathers in the backward (except root)
# outside no_sync:
# num_fsdp-1 all-gathers in the forward (except root)
# num_fsdp-1 all-gathers in the backward (except root)
expected_all_gather1 = 2 * num_fsdp - 1
expected_all_gather2 = expected_all_gather1 + (2 * num_fsdp - 2)
else:
# inside no_sync:
# num_fsdp all-gathers in the forward
# outside no_sync:
# none
expected_all_gather1 = num_fsdp
expected_all_gather2 = num_fsdp
expected_reduce_scatter = num_fsdp
batch = model.module.get_input(torch.device("cuda"))
# depending on pytorch version the _base methods may not be available
method_string_reduce_scatter_base = "torch.distributed._reduce_scatter_base"
if hasattr(torch.distributed, "_reduce_scatter_base") is False:
# no such method, to make mock_reduce_scatter_base 0 invocation, use an impossible name
method_string_reduce_scatter_base = "math.nan" # just an arbitrary function not going to be called
method_string_all_gather_base = "torch.distributed._all_gather_base"
if hasattr(torch.distributed, "_all_gather_base") is False:
# no such method, to make mock_all_gather_base 0 invocation, use an impossible name
method_string_all_gather_base = "math.nan" # just an arbitrary function not going to be called
with patch("torch.distributed.all_gather") as mock_all_gather:
with patch("torch.distributed.reduce_scatter") as mock_reduce_scatter:
with patch(method_string_all_gather_base) as mock_all_gather_base:
with patch(method_string_reduce_scatter_base) as mock_reduce_scatter_base:
with model.no_sync():
output = model(*batch)
loss = model.module.get_loss(batch, output)
loss.backward()
# the _base methods are activated when they are available.
# the sum of the _base and public methods should stay the same.
assert (
mock_all_gather.call_count + mock_all_gather_base.call_count == expected_all_gather1
), f"{mock_all_gather.call_count} + {mock_all_gather_base.call_count} != {expected_all_gather1}"
assert (
mock_reduce_scatter.call_count + mock_reduce_scatter_base.call_count == 0
), f"{mock_reduce_scatter.call_count} + {mock_reduce_scatter_base.call_count} != 0"
output = model(*batch)
loss = model.module.get_loss(batch, output)
loss.backward()
assert (
mock_all_gather.call_count + mock_all_gather_base.call_count == expected_all_gather2
), f"{mock_all_gather.call_count} + {mock_all_gather_base.call_count} != {expected_all_gather2}"
assert (
mock_reduce_scatter.call_count + mock_reduce_scatter_base.call_count
== expected_reduce_scatter
), f"{mock_reduce_scatter.call_count} + {mock_reduce_scatter_base.call_count} != {expected_reduce_scatter}"
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing ShardedDDP
"""
from contextlib import suppress
import copy
import numpy as np
import pytest
import torch
from torch.cuda.amp import GradScaler as TorchGradScaler
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import Linear, Sequential
from torch.nn.parallel import DistributedDataParallel as DDP
from fairscale.fair_dev.testing.testing import (
check_same_model_params,
skip_if_no_cuda,
skip_if_single_gpu,
temp_files_ctx,
)
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import ShardedDataParallel
from fairscale.optim import OSS
if torch_version() >= (1, 8, 0):
from fairscale.optim.grad_scaler import ShardedGradScaler
"""
Check that ShardedDDP gets the same results as DDP in a variety of scenarii
"""
_test_fp16_reduction = [False]
if hasattr(dist, "algorithms.ddp_com_hooks.default_hooks"):
_test_fp16_reduction.append(True)
_test_amp = [False]
if hasattr(torch.cuda.amp, "autocast"):
_test_amp.append(True)
EMB_SIZE = 32
BATCH_SIZE = 8
def _get_mlp_emb(multiple_fw: bool = False):
class MLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.trunk = Sequential(Linear(2, 3), Linear(3, 3), Linear(3, 3))
self.head = Sequential(Linear(3, 3), Linear(3, 3))
self.multiple_fw = multiple_fw
self.embedding = torch.nn.Embedding(EMB_SIZE, 2)
def forward(self, indices: torch.Tensor) -> torch.Tensor: # type: ignore
inputs = self.embedding(indices)
inputs = self.trunk(inputs) # type: ignore
if self.multiple_fw:
return self.head(self.head(inputs)) # type: ignore
return self.head(inputs) # type: ignore
return MLP()
def _get_random_inputs(device):
return torch.floor(torch.rand((BATCH_SIZE, 2)) * EMB_SIZE).to(dtype=torch.long, device=device)
def run_ddp_parity(
rank,
world_size,
backend,
temp_file_name,
reduce_buffer_size,
grad_accumulation,
change_train_graph,
fp16_reduction,
clip_grad_norm,
amp,
manual_reduction,
multiple_fw,
):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
device = torch.device("cuda")
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
NUMBER_BATCHS = 5
# Test all combinations: AMP, Accumulate, Change train graph, reduce buckets
print(
f"{rank}: Checking configuration: accumulate {grad_accumulation}"
+ f" - change train graph {change_train_graph}"
+ f" - amp {amp}"
+ f" - manual reduction {manual_reduction}"
+ f" - buffers {reduce_buffer_size}"
+ f" - multiple FW {multiple_fw}",
flush=True,
)
# The API should be the exact same in between the sharded and non-sharded variants, generic closure
def closure(model, scaler, input_tensor, should_accumulate, _manual_reduction=False):
accumulate_steps = 3 if should_accumulate else 1
model.zero_grad()
def step():
if scaler is not None:
with torch.cuda.amp.autocast():
loss = model(input_tensor).abs().sum()
scaler.scale(loss).backward()
else:
loss = model(input_tensor).abs().sum()
loss.backward()
with model.no_sync() if should_accumulate else suppress():
for _ in range(accumulate_steps - 1):
step()
if not _manual_reduction:
step()
else:
with model.no_sync():
step()
model.reduce()
# Any model works. Add one different buffer per rank
model = _get_mlp_emb(multiple_fw)
model.register_buffer("test_buffer", torch.ones(1) * rank)
model.to(device)
# Make sure that the model starts with non-trainable, so that we check for the buckets to be
# properly reassigned when/if this changes
next(model.parameters()).requires_grad = False
sharded_optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=1e-4, momentum=0.99)
sharded_ddp_model = ShardedDataParallel(
module=model,
sharded_optimizer=sharded_optimizer,
broadcast_buffers=True,
reduce_buffer_size=reduce_buffer_size,
reduce_fp16=fp16_reduction,
)
ddp_model_single = copy.deepcopy(model)
ddp_optimizer = torch.optim.SGD(ddp_model_single.parameters(), lr=1e-4, momentum=0.99)
ddp_model = DDP(ddp_model_single, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
if fp16_reduction:
from dist.algorithms.ddp_com_hooks.default_hooks import fp16_compress_hook
ddp_model.register_comm_hook(state=None, hook=fp16_compress_hook) # type: ignore
ddp_scaler = TorchGradScaler() if amp else None
sharded_scaler = ShardedGradScaler() if amp else None
# The model should be synchronized in between the ranks at construction time, check that
check_same_model_params(sharded_ddp_model, ddp_model)
# Typical training loop, check that we get the exact same results as DDP
for i in range(NUMBER_BATCHS):
input_tensor = _get_random_inputs(device)
def ddp_closure(input_tensor=input_tensor):
return closure(ddp_model, ddp_scaler, input_tensor, grad_accumulation)
def sharded_closure(input_tensor=input_tensor):
return closure(
sharded_ddp_model,
sharded_scaler,
input_tensor,
grad_accumulation,
_manual_reduction=manual_reduction,
)
# Step/scale both
for _scaler, _closure, _optimizer in (
(ddp_scaler, ddp_closure, ddp_optimizer),
(sharded_scaler, sharded_closure, sharded_optimizer),
):
if _scaler is not None:
_ = _closure(input_tensor)
_scaler.step(_optimizer)
_scaler.update()
else:
_optimizer.step(_closure())
check_same_model_params(sharded_ddp_model, ddp_model, f"Rank: {rank} - Step {i} broke")
# Check that the two grad norm are equivalent
# NOTE: The grads can occasionally be NaNs, the scaler will skip the step in that case
# This is not ShardedDDP specific. If the grads are not NaN for DDP then they should also
# be valid for ShardedDDP
# NOTE: DDP does not handle parameters trainability being changed after the fact, see
# https://github.com/pytorch/pytorch/blob/5781aec74ef00284e0262817a649278c2e8072bf/torch/nn/parallel/distributed.py#L471
if clip_grad_norm and not change_train_graph:
if torch_version() >= (1, 9, 0):
total_norm = torch.nn.utils.clip_grad_norm_(ddp_model.parameters(), 0.3, norm_type=2.0, error_if_nonfinite=False) # type: ignore
else:
total_norm = torch.nn.utils.clip_grad_norm_(ddp_model.parameters(), 0.3, norm_type=2.0) # type: ignore
if not torch.isnan(total_norm):
oss_total_norm = sharded_optimizer.clip_grad_norm(0.3, norm_type=2.0)
allclose = torch.allclose(oss_total_norm, total_norm, atol=1e-2 if amp else 1e-8)
if not allclose:
# Debug helper if this unit test does not pass, compare the gradients in between DDP and ShardedDDP
for idx, (p_ddp, p_sdp) in enumerate(zip(ddp_model.parameters(), sharded_ddp_model.parameters())):
if p_ddp.grad is not None:
if p_sdp.grad is not None:
print(rank, idx, torch.norm(p_ddp.grad), torch.norm(p_sdp.grad), flush=True)
else:
print(rank, idx, torch.norm(p_ddp.grad), "not owned", flush=True)
assert (
allclose
), f"torch and fairscale should return the same grad norm\n {oss_total_norm} vs {total_norm}"
else:
print(rank, "NaN grad norm in DDP", flush=True)
# Flip the trainability of the first parameter back and forth
if i == 0 and change_train_graph:
next(sharded_ddp_model.parameters()).requires_grad = not next(sharded_ddp_model.parameters()).requires_grad
next(ddp_model.parameters()).requires_grad = not next(ddp_model.parameters()).requires_grad
check_same_model_params(sharded_ddp_model, ddp_model, f"Rank: {rank} - Trainability refresh {i} broke")
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
@pytest.mark.parametrize("grad_accumulation", [True, False])
@pytest.mark.parametrize("change_train_graph", [True, False])
@pytest.mark.parametrize("fp16_reduction", _test_fp16_reduction)
@pytest.mark.parametrize("clip_grad_norm", [True, False])
@pytest.mark.parametrize("amp", _test_amp)
@pytest.mark.parametrize("manual_reduction", [True, False])
@pytest.mark.parametrize("multiple_fw", [True, False])
def test_ddp_parity(
reduce_buffer_size,
grad_accumulation,
change_train_graph,
fp16_reduction,
clip_grad_norm,
amp,
manual_reduction,
multiple_fw,
):
if torch_version() < (1, 8, 0):
pytest.skip("pytorch version >= 1.8.0 required")
if manual_reduction and change_train_graph:
pytest.skip("Skipping changing model and grad accumulation combination, makes little sense")
world_size = torch.cuda.device_count()
backend = dist.Backend.NCCL
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_ddp_parity,
args=(
world_size,
backend,
temp_files[0],
reduce_buffer_size,
grad_accumulation,
change_train_graph,
fp16_reduction,
clip_grad_norm,
amp,
manual_reduction,
multiple_fw,
),
nprocs=world_size,
join=True,
)
def run_ddp_parity_two_optim(rank, world_size, backend, temp_file_name, reduce_buffer_size):
dist.init_process_group(init_method="file://" + temp_file_name, backend=backend, rank=rank, world_size=world_size)
device = torch.device("cuda")
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank) # Any model works. Add one different buffer per rank
BATCHS = 20
model = _get_mlp_emb()
model.register_buffer("test_buffer", torch.ones(1) * rank)
model.to(device)
n_half_params = len(list(model.parameters())) // 2
optim_settings = {"lr": 1e-3, "momentum": 0.99}
sharded_optimizer = OSS(params=list(model.parameters())[:n_half_params], optim=torch.optim.SGD, **optim_settings)
sharded_optimizer_2 = OSS(params=list(model.parameters())[n_half_params:], optim=torch.optim.SGD, **optim_settings)
sharded_ddp_model = ShardedDataParallel(
module=model,
sharded_optimizer=[sharded_optimizer, sharded_optimizer_2],
broadcast_buffers=True,
reduce_buffer_size=reduce_buffer_size,
)
ddp_model_single = copy.deepcopy(model)
ddp_optimizer = torch.optim.SGD(list(ddp_model_single.parameters())[:n_half_params], **optim_settings)
ddp_optimizer_2 = torch.optim.SGD(list(ddp_model_single.parameters())[n_half_params:], **optim_settings)
ddp_model = DDP(ddp_model_single, device_ids=[rank], broadcast_buffers=True)
check_same_model_params(
sharded_ddp_model,
ddp_model,
f"DDP parity two optim test failing. differing at startup, Buffers {reduce_buffer_size}",
)
for i in range(BATCHS):
input_tensor = _get_random_inputs(device)
# Run DDP
ddp_optimizer.zero_grad()
ddp_optimizer_2.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
ddp_optimizer.step()
ddp_optimizer_2.step()
torch.cuda.synchronize(device)
# Run Sharded
sharded_optimizer.zero_grad()
sharded_optimizer_2.zero_grad()
sharded_loss = sharded_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
sharded_optimizer.step()
sharded_optimizer_2.step()
torch.cuda.synchronize(device)
check_same_model_params(
sharded_ddp_model,
ddp_model,
f"DDP parity two optim test failing, step {i}, buffers {reduce_buffer_size}",
)
dist.destroy_process_group()
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("reduce_buffer_size", [0, 2**20])
def test_ddp_parity_two_optim(reduce_buffer_size):
world_size = 2
backend = dist.Backend.NCCL
with temp_files_ctx(num=1) as temp_files:
mp.spawn(
run_ddp_parity_two_optim,
args=(world_size, backend, temp_files[0], reduce_buffer_size),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with some params frozen. """
from enum import Enum
from itertools import product
import tempfile
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.optim as optim
from fairscale.fair_dev.testing.testing import dist_init, objects_are_equal, rmf, skip_if_single_gpu, teardown
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
class FreezeModel(nn.Module):
def __init__(self):
super().__init__()
self.trunk = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(64, 10)
self.trunk = FSDP(self.trunk)
def forward(self, x):
return self.head(self.trunk(x))
def _freeze_distributed_worker(
gpu_id,
world_size,
tempfile_name,
unused,
):
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
batch = torch.randn(size=(2, 3, 224, 224)).cuda()
# The use case for this test is where the weights in the submodule
# are not frozen but the leftover weights or those contained by the
# root module are frozen. Refer to issue #758 for a real world example.
model = FreezeModel()
model = model.cuda()
for param in model.head.parameters():
param.requires_grad = False
model = FSDP(model)
if gpu_id == 0:
print(model)
target = torch.tensor([0, 1], dtype=torch.long).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
for iteration in range(3):
out = model(batch)
fake_loss = criterion(out, target)
print("Loss", iteration, ":", fake_loss.item())
optimizer.zero_grad()
fake_loss.backward()
optimizer.step()
teardown()
@skip_if_single_gpu
def test_submodule_freezing_weights(temp_files):
world_size = 2
mp.spawn(
_freeze_distributed_worker,
(world_size, temp_files[0], temp_files[1]),
nprocs=world_size,
)
class Model(nn.Module):
def __init__(self, with_fsdp, freeze_after_wrap_fsdp):
super().__init__()
self.trunk = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(64, 10)
if with_fsdp and freeze_after_wrap_fsdp:
self.fsdp_wrap()
def fsdp_wrap(self):
self.trunk = FSDP(self.trunk)
self.head = FSDP(self.head)
def forward(self, x):
return self.head(self.trunk(x))
class NestedTrunkModel(nn.Module):
def __init__(self, with_fsdp, freeze_after_wrap_fsdp):
super().__init__()
self.trunk = nn.Sequential(
self._create_block(3, 64, with_fsdp, freeze_after_wrap_fsdp),
self._create_block(64, 64, with_fsdp, freeze_after_wrap_fsdp),
)
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
nn.Linear(64, 10),
)
if with_fsdp and freeze_after_wrap_fsdp:
self.fsdp_wrap()
def fsdp_wrap(self):
for name, child in self.trunk.named_children():
wrapped_child = FSDP(child)
setattr(self.trunk, name, wrapped_child)
self.trunk = FSDP(self.trunk)
self.head = FSDP(self.head)
def forward(self, x):
return self.head(self.trunk(x))
def _create_block(self, in_channels, out_channels, with_fsdp, freeze_after_wrap_fsdp):
block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3),
nn.ReLU(inplace=True),
)
return block
def _create_model(with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp):
if with_nested_trunk:
model = NestedTrunkModel(with_fsdp, freeze_after_wrap_fsdp)
else:
model = Model(with_fsdp, freeze_after_wrap_fsdp)
return model
class FreezingMethod(str, Enum):
GradToNone = "grad_to_none"
RequiresGrad = "requires_grad"
def _distributed_worker(
gpu_id,
world_size,
with_fsdp,
with_nested_trunk,
freezing_method,
freeze_after_wrap_fsdp,
tempfile_name,
unused,
rank_0_output,
expected_state,
):
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
batch = torch.randn(size=(2, 3, 224, 224)).cuda()
model = _create_model(with_fsdp, with_nested_trunk, freeze_after_wrap_fsdp)
model = model.cuda()
# freezing the trunk using requires_grad.
if freezing_method == FreezingMethod.RequiresGrad:
for param in model.trunk.parameters():
param.requires_grad = False
if with_fsdp:
if not freeze_after_wrap_fsdp:
model.fsdp_wrap()
model = FSDP(model)
else:
model = DistributedDataParallel(model, device_ids=[gpu_id])
if gpu_id == 0:
print(model)
target = torch.tensor([0, 1], dtype=torch.long).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
for iteration in range(3):
out = model(batch)
fake_loss = criterion(out, target)
print("Loss", iteration, ":", fake_loss.item())
optimizer.zero_grad()
fake_loss.backward()
if freezing_method == FreezingMethod.GradToNone:
for param in model.trunk.parameters():
param.grad = None
optimizer.step()
if with_fsdp:
fsdp_state = model.state_dict()
# Move tensors to CPU to compare numerics.
for k, v in fsdp_state.items():
fsdp_state[k] = v.cpu()
assert objects_are_equal(expected_state, fsdp_state, raise_exception=True)
elif rank == 0:
state_after = model.module.cpu().state_dict()
torch.save(state_after, rank_0_output)
teardown()
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
num = 15 # 1 DDP and 4 FSDP cases each needs 3 files.
files = [tempfile.mkstemp()[1] for _ in range(num)]
yield tuple(files)
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name)
@skip_if_single_gpu
@pytest.mark.parametrize("nested_trunk", ["nested_trunk", "simple_trunk"])
def test_freezing_weights(temp_files, nested_trunk):
with_nested_trunk = nested_trunk == "nested_trunk"
world_size = 2
# DDP
with_fsdp = False
freezing_method = FreezingMethod.RequiresGrad
mp.spawn(
_distributed_worker,
(world_size, with_fsdp, with_nested_trunk, freezing_method, True) + temp_files[0:3] + (None,),
nprocs=world_size,
)
# FSDP, case 1 and 2.
with_fsdp = True
expected_state = torch.load(temp_files[2])
temp_file_idx = 3
for freezing_method, freeze_after_wrap_fsdp in product(
[FreezingMethod.RequiresGrad, FreezingMethod.GradToNone], [True, False]
):
print(f"Testing FSDP with freezing method {freezing_method}")
mp.spawn(
_distributed_worker,
(world_size, with_fsdp, with_nested_trunk, freezing_method, freeze_after_wrap_fsdp)
+ temp_files[temp_file_idx : temp_file_idx + 3]
+ (expected_state,),
nprocs=world_size,
)
temp_file_idx += 3
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
import tempfile
from parameterized import parameterized
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.optim import Adam
from fairscale.fair_dev.testing.testing import in_temporary_directory, skip_if_single_gpu, temp_files_ctx
from fairscale.nn import FullyShardedDataParallel
from tests.nn.data_parallel.test_fsdp import DistributedTest, MixtureOfExperts, rename_test, spawn_and_init
USE_TEMPFILE = True # False for debugging
class ConvolutionalModel(nn.Module):
def __init__(self, embedding_size: int, with_fsdp: bool, process_group):
super().__init__()
self.conv1 = self._conv_block(3, embedding_size)
self.conv2: nn.Module = self._conv_block(embedding_size, embedding_size // 2)
self.conv3: nn.Module = self._conv_block(embedding_size // 2, embedding_size)
self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.flatten = nn.Flatten(start_dim=1)
self.relu = nn.ReLU()
self.fc1: nn.Module = nn.Linear(embedding_size, 2 * embedding_size)
self.fc2: nn.Module = nn.Linear(2 * embedding_size, 2 * embedding_size)
self.fc3: nn.Module = nn.Linear(2 * embedding_size, embedding_size + 1)
self.fc4: nn.Module = nn.Linear(embedding_size + 1, embedding_size)
if with_fsdp:
self.conv2 = FullyShardedDataParallel(self.conv2, process_group=process_group)
self.conv3 = FullyShardedDataParallel(self.conv3, process_group=process_group, flatten_parameters=False)
self.fc1 = FullyShardedDataParallel(self.fc1, process_group=process_group)
self.fc3 = FullyShardedDataParallel(self.fc3, process_group=process_group, flatten_parameters=False)
@staticmethod
def _conv_block(in_channels: int, out_channels: int):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3)),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.relu(x)
x = self.fc4(x)
return x
def _create_model(embedding_size: int, with_fsdp: bool, process_group, flatten_parameters: bool = True):
model = ConvolutionalModel(with_fsdp=with_fsdp, process_group=process_group, embedding_size=embedding_size).cuda()
if with_fsdp:
return FullyShardedDataParallel(model, process_group=process_group, flatten_parameters=flatten_parameters)
else:
return model
def _load_sharded_checkpoint(rank: int):
return torch.load(f"checkpoint_{rank}.torch") # type: ignore
def _worker(gpu_id: int, sync_file: str, world_size: int, embedding_size: int, flatten_parameters: bool):
torch.manual_seed(0)
torch.cuda.set_device(gpu_id)
torch.distributed.init_process_group(
backend="nccl",
init_method=f"file://{sync_file}",
world_size=world_size,
rank=gpu_id,
)
process_group = torch.distributed.new_group()
# Create a dummy model with dummy inputs and targets
batch_size = 4
input = torch.randn(size=(batch_size, 3, 32, 32)).cuda()
target = torch.zeros(size=(batch_size, embedding_size)).cuda()
model = _create_model(
with_fsdp=True,
process_group=process_group,
embedding_size=embedding_size,
flatten_parameters=flatten_parameters,
)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
# Train the model for a few epochs
for epoch in range(2):
out = model(input)
loss = criterion(out, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Save a bunch of checkpoint, one by shard
cp_data = {
"weights": {k: v.cpu() for k, v in model.local_state_dict().items()},
"meta": model.local_metadata_dict(),
}
torch.save(cp_data, f"checkpoint_{gpu_id}.torch")
# Wait for all files to be written on the disk
dist.barrier() # type: ignore
# Reconstruct a full checkpoint from the sharded checkpoints
all_checkpoints = [_load_sharded_checkpoint(rank) for rank in range(world_size)]
consolidated_checkpoint = FullyShardedDataParallel.consolidate_shard_weights(
shard_weights=[c["weights"] for c in all_checkpoints],
shard_metadata=[c["meta"] for c in all_checkpoints],
)
# Check that the reconstructed parameters are correct and of the right shape
full_model = _create_model(with_fsdp=False, process_group=process_group, embedding_size=embedding_size)
full_model_state_dict = full_model.state_dict()
assert set(full_model_state_dict.keys()) == set(consolidated_checkpoint.keys())
for k in full_model_state_dict.keys():
assert consolidated_checkpoint[k].shape == full_model_state_dict[k].shape
# Verify that the checkpoint can be loaded by a FSDP model
loaded_model = _create_model(
with_fsdp=True,
process_group=process_group,
embedding_size=embedding_size,
flatten_parameters=flatten_parameters,
)
loaded_model.load_state_dict(consolidated_checkpoint)
for m in loaded_model.modules():
if isinstance(m, FullyShardedDataParallel):
m._reset_lazy_init()
# Verify that the model saved and the model loaded give the same results
with torch.no_grad():
before_checkpoint_loss = criterion(model(input), target).item()
after_checkpoint_loss = criterion(loaded_model(input), target).item()
assert before_checkpoint_loss == after_checkpoint_loss
@skip_if_single_gpu
@pytest.mark.parametrize("embedding_size", [128, 129])
@pytest.mark.parametrize("flatten_parameters", [True, False])
def test_consolidation(embedding_size: int, flatten_parameters: bool):
world_size = 2
with in_temporary_directory():
with temp_files_ctx(num=1) as temp_files:
mp.spawn(_worker, (temp_files[0], world_size, embedding_size, flatten_parameters), nprocs=world_size)
@skip_if_single_gpu
class TestConsolidatedWeights(DistributedTest):
@parameterized.expand(
[[True], [False]],
name_func=rename_test,
)
def test_consolidate_weights(self, transformer):
config = {"mixed_precision": True, "flatten_parameters": True, "compute_dtype": torch.float32}
world_size = min(torch.cuda.device_count(), 4)
if USE_TEMPFILE:
with tempfile.TemporaryDirectory() as d:
paths = [os.path.join(d, f"checkpoint_{rank}.pt") for rank in range(world_size)]
test_fn = functools.partial(
self._test_consolidate_weights, config, transformer=transformer, paths=paths
)
spawn_and_init(test_fn, world_sizes=[world_size])
else:
paths = [f"checkpoint_{rank}.pt" for rank in range(world_size)]
test_fn = functools.partial(self._test_consolidate_weights, config, transformer=transformer, paths=paths)
spawn_and_init(test_fn, world_sizes=[world_size])
@classmethod
def _test_consolidate_weights(self, config, rank, group, paths=None, transformer=False):
"""FSDP.gather_full_optim_state_dict() should return something very similar to optimizer.state_dict()"""
# Establish reference behavior.
if transformer:
fsdp = self.get_wrapped_model(group, config=config).cuda()
else:
fsdp = FullyShardedDataParallel(MixtureOfExperts(group, wrapper_config=config)).cuda()
optim = Adam(
fsdp.parameters(),
lr=0.01,
)
optim.zero_grad()
with torch.cuda.amp.autocast(enabled=True):
x = fsdp.module.get_input(torch.device("cuda"))
output = fsdp(*x)
loss = fsdp.module.get_loss(x, output).to("cuda")
fsdp.module.run_backward(loss)
optim.step()
# each worker saves a checkpoint with local_state_dict
cp_data = {
"weights": {k: v.cpu() for k, v in fsdp.local_state_dict().items()},
"meta": fsdp.local_metadata_dict(),
}
torch.save(cp_data, paths[fsdp.rank])
full_model_state_dict = fsdp.state_dict()
torch.distributed.barrier()
if fsdp.rank > 0:
return
all_checkpoints = [torch.load(p) for p in paths]
consolidated_checkpoint = FullyShardedDataParallel.consolidate_shard_weights(
shard_weights=[c["weights"] for c in all_checkpoints],
shard_metadata=[c["meta"] for c in all_checkpoints],
)
full_model_extra = set(full_model_state_dict).difference(set(consolidated_checkpoint))
consolidated_extra = set(consolidated_checkpoint).difference(set(full_model_state_dict))
msg = f"full model extra keys: {full_model_extra}, consolidated extra {consolidated_extra}"
for k in full_model_state_dict.keys():
assert consolidated_checkpoint[k].shape == full_model_state_dict[k].shape
assert set(full_model_state_dict.keys()) == set(consolidated_checkpoint.keys()), msg
def test_consolidate_missing_params():
"""This tests that fairseq experts, which are saved independently from the rest of the model, can be consolidated."""
desired_path = "decoder.layers.1.moe_layer.experts.0"
shard_metadata = {
"param_metadata": [
{
"fsdp_path": "",
"params": {
"flat_param_0": {"names": ["missing"], "shapes": [(12, 4)], "numels": [12 * 4], "padding": 0}
},
"no_broadcast_optim_state": False,
"shared_param_info": [],
},
{
"fsdp_path": desired_path,
"params": {
"flat_param_0": {
"names": ["fc1.weight", "fc1.bias", "fc2.weight", "fc2.bias"],
"shapes": [(4, 4), (4,), (4, 4), (4,)],
"numels": [16, 4, 16, 4],
"padding": 0,
}
},
"no_broadcast_optim_state": True,
"shared_param_info": [],
},
],
"buffer_names": ["missing.buffer"],
}
shard_weights = {"decoder.layers.1.moe_layer.experts.0.flat_param_0": torch.randn(40, dtype=torch.float16)}
consolidated_weights = FullyShardedDataParallel.consolidate_shard_weights(
[shard_weights], [shard_metadata], strict=False
)
assert len(consolidated_weights) == 4
for k in consolidated_weights:
assert k.startswith(desired_path), f"{k} doesnt start with {desired_path}"
|
from collections import OrderedDict
from dataclasses import dataclass
import tempfile
import unittest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import dist_init
from fairscale.nn import FullyShardedDataParallel as FSDP
from fairscale.nn import auto_wrap, enable_wrap
def wrap_transformer_only(module, recurse, **kwargs):
if recurse:
return True
else:
return isinstance(module, nn.Transformer)
class ModelOutput(OrderedDict):
# Partially adapted from the HF transformers lib in order to simulate the behavior
def to_tuple(self):
return tuple(self[k] for k in self.keys())
def __post_init__(self):
class_fields = getattr(self, "__dataclass_fields__")
for field in class_fields:
v = getattr(self, field)
if v is not None:
self[field] = v
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
@dataclass
class TransformerOutput(ModelOutput):
output: torch.FloatTensor = None
class TransformerWithCustomOutput(nn.Transformer): # type: ignore[name-defined]
def forward(self, *args, **kwargs):
output = super().forward(*args, **kwargs)
return TransformerOutput(output=output)
class TransformerWithLMHead(nn.Module):
def __init__(self, d_vocab=100, d_model=16):
super().__init__()
self.d_vocab = d_vocab
self.d_model = d_model
self.embed_tokens = nn.Embedding(d_vocab, d_model)
self.transformer = TransformerWithCustomOutput(
d_model, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=64
)
self.output_proj = nn.Linear(d_model, d_vocab)
def generate_random_sequences(self, seq_len=20, batch_size=2):
source_seq = torch.randint(high=self.d_vocab, size=(seq_len, batch_size))
target_seq = torch.randint(high=self.d_vocab, size=(seq_len, batch_size))
return source_seq, target_seq
def forward(self, source_seq, target_seq):
source_embeddings = self.embed_tokens(source_seq)
target_embeddings = self.embed_tokens(target_seq)
output = self.transformer(source_embeddings, target_embeddings)
# Using integer key here, just like in Huggingface transformer lib
return self.output_proj(output[0])
class TestHFTransformersAutoWrap(unittest.TestCase):
def setUp(self) -> None:
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
torch.cuda.set_device(0)
_, filename = tempfile.mkstemp()
_, filename_rpc = tempfile.mkstemp()
dist_init(0, 1, filename, filename_rpc)
self.device = torch.device("cuda")
print("Build model ...")
self.model = TransformerWithLMHead()
self.model.to(self.device)
def test_auto_wrap_hf_model(self):
with enable_wrap(wrapper_cls=FSDP, auto_wrap_policy=wrap_transformer_only):
self.model = auto_wrap(self.model)
self.model = FSDP(self.model)
self.assertTrue(isinstance(self.model.transformer, FSDP), "Transformer should have been wrapped with FSDP")
source_seq, target_seq = self.model.generate_random_sequences()
source_seq = source_seq.to(self.device)
target_seq = target_seq.to(self.device)
print("Evaluating model ...")
# This should not fail
self.model(source_seq, target_seq)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with GPU memory usage. """
import contextlib
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.optim as optim
from fairscale.fair_dev.testing.testing import dist_init, dump_all_tensors, skip_if_single_gpu, teardown, temp_files_ctx
from fairscale.internal import torch_version
from fairscale.internal.parallel import get_process_group_cached
from fairscale.nn import checkpoint_wrapper
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import auto_wrap_bn
def to_fsdp(module, fsdp_config):
return FSDP(module, process_group=get_process_group_cached(), **fsdp_config)
def get_cur_mem(rank, result, prefix):
"""Collect memory allocated values in a result dict in MB"""
result[prefix] = round(torch.cuda.memory_allocated() / 1024 / 1024)
class Model(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
# TODO (Min): for both fast and memory efficient conv kernels, we should be using
# AMP/fp16 + channel_last input format. Otherwise, cudnn internally does conversion
# to channel_last when it is fp16 weights. Leave this knowledge here and perhaps
# future test can cover it.
self.stem = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.blocks = nn.Sequential(
nn.Conv2d(64, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(hidden_dim, 10)
def forward(self, x):
return self.head(self.blocks(self.stem(x)))
def create_model(with_fsdp, with_checkpoint, model_hidden_dim, fsdp_config):
model = Model(model_hidden_dim)
if with_fsdp:
model.stem = auto_wrap_bn(model.stem, single_rank_pg=False)
model.blocks = auto_wrap_bn(model.blocks, single_rank_pg=False)
if with_checkpoint:
model.blocks = checkpoint_wrapper(model.blocks)
model.stem = to_fsdp(model.stem, fsdp_config)
model.blocks = to_fsdp(model.blocks, fsdp_config)
model.head = to_fsdp(model.head, fsdp_config)
else:
if with_checkpoint:
model.blocks = checkpoint_wrapper(model.blocks)
return model
def _distributed_worker(
gpu_id, world_size, with_fsdp, with_checkpoint, filename, filename_rpc, expected, model_hidden_dim, fsdp_config
):
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, filename, filename_rpc)
assert result, "Dist init failed"
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
# Note that FSDP auto-cast the input in AMP mode. So we don't need to call half() here.
batch = torch.randn(size=(2, 3, 224, 224)).cuda()
model = create_model(with_fsdp, with_checkpoint, model_hidden_dim, fsdp_config)
model = model.cuda()
if with_fsdp:
model = to_fsdp(model, fsdp_config)
else:
model = DistributedDataParallel(model, device_ids=[gpu_id], bucket_cap_mb=500)
# We enable momentum so that after the first iteration, the optimizer state is added
# to the total memory used.
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
# Set AMP context if needed.
context = contextlib.suppress()
if "mixed_precision" in fsdp_config and fsdp_config["mixed_precision"]:
context = torch.cuda.amp.autocast(enabled=True)
# We have observed that sometimes after 3rd iteration, 4th one can fail (not on this
# test but on much bigger scale tests). We run 4 iterations here just in case it happens.
iterations = 4
results = {} # results of memory stats
for iteration in range(iterations):
get_cur_mem(gpu_id, results, f"iter {iteration}: start")
with context:
out = model(batch)
get_cur_mem(gpu_id, results, f"iter {iteration}: after fwd")
out = sum(o.sum() for o in out[0])
fake_loss = criterion(out, torch.tensor(0.0).cuda())
get_cur_mem(gpu_id, results, f"iter {iteration}: after loss")
fake_loss.backward()
get_cur_mem(gpu_id, results, f"iter {iteration}: after bwd")
optimizer.step()
get_cur_mem(gpu_id, results, f"iter {iteration}: after step")
# It is important to use `set_to_none` below, not optimizer.zero_grad() to reclaim memory.
if torch_version() >= (1, 7, 0):
model.zero_grad(set_to_none=True)
else:
for p in model.parameters():
p.grad = None
get_cur_mem(gpu_id, results, f"iter {iteration}: done")
dump_all_tensors(gpu_id)
print(results)
def cmp(results, expected):
ret = ""
assert results.keys() == expected.keys(), f"{list(results.keys())} vs. {list(expected.keys())}"
for k, v in results.items():
exp = expected[k]
if abs(exp - v) > 1: # allow 1MB rounding differences
ret += f"{k}: got {v}, expected {exp}\n"
return ret
output = cmp(results, expected)
assert not output, output
teardown()
@skip_if_single_gpu
@pytest.mark.timeout(120)
@pytest.mark.parametrize("ckpt", ["no_ckpt", "ckpt"])
@pytest.mark.parametrize("fsdp", ["ddp", "fsdp", "fsdp_amp_default", "fsdp_amp_compute_dtype32"])
@pytest.mark.skipif(
torch_version() >= (1, 14, 0),
reason="Tests broke in Pytorch pre-release version 1.14",
)
def test_fsdp_memory(fsdp, ckpt):
expected = {
("ddp", "no_ckpt"): {
"iter 0: start": 9,
"iter 0: after fwd": 346,
"iter 0: after loss": 346,
"iter 0: after bwd": 14,
"iter 0: after step": 17,
"iter 0: done": 13,
"iter 1: start": 13,
"iter 1: after fwd": 350,
"iter 1: after loss": 350,
"iter 1: after bwd": 17,
"iter 1: after step": 17,
"iter 1: done": 13,
"iter 2: start": 13,
"iter 2: after fwd": 350,
"iter 2: after loss": 350,
"iter 2: after bwd": 17,
"iter 2: after step": 17,
"iter 2: done": 13,
"iter 3: start": 13,
"iter 3: after fwd": 350,
"iter 3: after loss": 350,
"iter 3: after bwd": 17,
"iter 3: after step": 17,
"iter 3: done": 13,
},
("fsdp", "no_ckpt"): {
"iter 0: start": 3,
"iter 0: after fwd": 340,
"iter 0: after loss": 340,
"iter 0: after bwd": 16,
"iter 0: after step": 18,
"iter 0: done": 5,
"iter 1: start": 5,
"iter 1: after fwd": 342,
"iter 1: after loss": 342,
"iter 1: after bwd": 18,
"iter 1: after step": 18,
"iter 1: done": 5,
"iter 2: start": 5,
"iter 2: after fwd": 342,
"iter 2: after loss": 342,
"iter 2: after bwd": 18,
"iter 2: after step": 18,
"iter 2: done": 5,
"iter 3: start": 5,
"iter 3: after fwd": 342,
"iter 3: after loss": 342,
"iter 3: after bwd": 18,
"iter 3: after step": 18,
"iter 3: done": 5,
},
("fsdp_amp_default", "no_ckpt"): {
"iter 0: start": 28,
"iter 0: after fwd": 630,
"iter 0: after loss": 630,
"iter 0: after bwd": 67,
"iter 0: after step": 93,
"iter 0: done": 54,
"iter 1: start": 54,
"iter 1: after fwd": 657,
"iter 1: after loss": 657,
"iter 1: after bwd": 93,
"iter 1: after step": 93,
"iter 1: done": 54,
"iter 2: start": 54,
"iter 2: after fwd": 657,
"iter 2: after loss": 657,
"iter 2: after bwd": 93,
"iter 2: after step": 93,
"iter 2: done": 54,
"iter 3: start": 54,
"iter 3: after fwd": 657,
"iter 3: after loss": 657,
"iter 3: after bwd": 93,
"iter 3: after step": 93,
"iter 3: done": 54,
},
("fsdp_amp_compute_dtype32", "no_ckpt"): {
"iter 0: start": 28,
"iter 0: after fwd": 657,
"iter 0: after loss": 657,
"iter 0: after bwd": 67,
"iter 0: after step": 93,
"iter 0: done": 54,
"iter 1: start": 54,
"iter 1: after fwd": 684,
"iter 1: after loss": 684,
"iter 1: after bwd": 93,
"iter 1: after step": 93,
"iter 1: done": 54,
"iter 2: start": 54,
"iter 2: after fwd": 684,
"iter 2: after loss": 684,
"iter 2: after bwd": 93,
"iter 2: after step": 93,
"iter 2: done": 54,
"iter 3: start": 54,
"iter 3: after fwd": 684,
"iter 3: after loss": 684,
"iter 3: after bwd": 93,
"iter 3: after step": 93,
"iter 3: done": 54,
},
("ddp", "ckpt"): {
"iter 0: start": 9,
"iter 0: after fwd": 57,
"iter 0: after loss": 57,
"iter 0: after bwd": 14,
"iter 0: after step": 17,
"iter 0: done": 13,
"iter 1: start": 13,
"iter 1: after fwd": 61,
"iter 1: after loss": 61,
"iter 1: after bwd": 17,
"iter 1: after step": 17,
"iter 1: done": 13,
"iter 2: start": 13,
"iter 2: after fwd": 61,
"iter 2: after loss": 61,
"iter 2: after bwd": 17,
"iter 2: after step": 17,
"iter 2: done": 13,
"iter 3: start": 13,
"iter 3: after fwd": 61,
"iter 3: after loss": 61,
"iter 3: after bwd": 17,
"iter 3: after step": 17,
"iter 3: done": 13,
},
("fsdp", "ckpt"): {
"iter 0: start": 3,
"iter 0: after fwd": 51,
"iter 0: after loss": 51,
"iter 0: after bwd": 16,
"iter 0: after step": 18,
"iter 0: done": 5,
"iter 1: start": 5,
"iter 1: after fwd": 53,
"iter 1: after loss": 53,
"iter 1: after bwd": 18,
"iter 1: after step": 18,
"iter 1: done": 5,
"iter 2: start": 5,
"iter 2: after fwd": 53,
"iter 2: after loss": 53,
"iter 2: after bwd": 18,
"iter 2: after step": 18,
"iter 2: done": 5,
"iter 3: start": 5,
"iter 3: after fwd": 53,
"iter 3: after loss": 53,
"iter 3: after bwd": 18,
"iter 3: after step": 18,
"iter 3: done": 5,
},
("fsdp_amp_default", "ckpt"): {
"iter 0: start": 28,
"iter 0: after fwd": 52,
"iter 0: after loss": 52,
"iter 0: after bwd": 67,
"iter 0: after step": 93,
"iter 0: done": 54,
"iter 1: start": 54,
"iter 1: after fwd": 79,
"iter 1: after loss": 79,
"iter 1: after bwd": 93,
"iter 1: after step": 93,
"iter 1: done": 54,
"iter 2: start": 54,
"iter 2: after fwd": 79,
"iter 2: after loss": 79,
"iter 2: after bwd": 93,
"iter 2: after step": 93,
"iter 2: done": 54,
"iter 3: start": 54,
"iter 3: after fwd": 79,
"iter 3: after loss": 79,
"iter 3: after bwd": 93,
"iter 3: after step": 93,
"iter 3: done": 54,
},
("fsdp_amp_compute_dtype32", "ckpt"): {
"iter 0: start": 28,
"iter 0: after fwd": 52,
"iter 0: after loss": 52,
"iter 0: after bwd": 67,
"iter 0: after step": 93,
"iter 0: done": 54,
"iter 1: start": 54,
"iter 1: after fwd": 79,
"iter 1: after loss": 79,
"iter 1: after bwd": 93,
"iter 1: after step": 93,
"iter 1: done": 54,
"iter 2: start": 54,
"iter 2: after fwd": 79,
"iter 2: after loss": 79,
"iter 2: after bwd": 93,
"iter 2: after step": 93,
"iter 2: done": 54,
"iter 3: start": 54,
"iter 3: after fwd": 79,
"iter 3: after loss": 79,
"iter 3: after bwd": 93,
"iter 3: after step": 93,
"iter 3: done": 54,
},
}[(fsdp, ckpt)]
# Compute the FSDP config.
fsdp_config = {}
# Set mixed precision.
if "amp" in fsdp:
fsdp_config["mixed_precision"] = True
# When compute_dtype is FP32, make sure we use clear_autocast_cache.
# Setting fp32_reduce_scatter and verbose for more code coverage.
if "compute_dtype32" in fsdp:
fsdp_config["compute_dtype"] = torch.float32
fsdp_config["fp32_reduce_scatter"] = True
fsdp_config["clear_autocast_cache"] = True
fsdp_config["verbose"] = True
# Using bigger hidden dimension for AMP to increase the model size
# so that bug in handling params will show up but we don't do that
# in the base case to keep the test fast.
# - hidden_dim 128: model size ~4MB
# - hidden_dim 512: model size ~55MB
# - hidden_dim 1024: model size ~200MB (seems to be too big for CI tests though)
model_hidden_dim = 128
if "amp" in fsdp:
model_hidden_dim = 512
# Get the fsdp and checkpoint flags.
with_fsdp = "fsdp" in fsdp
with_ckpt = ckpt == "ckpt"
world_size = 2
with temp_files_ctx(num=2) as temp_files:
mp.spawn(
_distributed_worker,
(world_size, with_fsdp, with_ckpt, temp_files[0], temp_files[1], expected, model_hidden_dim, fsdp_config),
nprocs=world_size,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with regnet-like model. """
import contextlib
from itertools import product
import random
import tempfile
import pytest
import torch
from torch.cuda.amp import GradScaler
import torch.multiprocessing as mp
from torch.nn import (
AdaptiveAvgPool2d,
BatchNorm2d,
Conv2d,
CrossEntropyLoss,
Linear,
Module,
ReLU,
Sequential,
Sigmoid,
SyncBatchNorm,
)
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import (
dist_init,
objects_are_equal,
rmf,
skip_if_single_gpu,
state_dict_norm,
teardown,
torch_cuda_version,
)
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import TrainingState, auto_wrap_bn
if torch_version() >= (1, 8, 0):
from fairscale.optim.grad_scaler import ShardedGradScaler
# Const test params.
# Reduce iterations to 1 for debugging.
# Change world_size to 8 on beefy machines for better test coverage.
_world_size = 2
_iterations = 5
# Cover different ReLU flavors. Different workers may have different values since
# this is a file level global. This is intensional to cover different behaviors.
_relu_inplace = True
if random.randint(0, 1) == 0:
_relu_inplace = False
# TODO (Min): test apex BN when available in the future.
try:
import apex
apex_bn_converter = apex.parallel.convert_syncbn_model
except ImportError:
apex_bn_converter = None
pytorch_bn_converter = SyncBatchNorm.convert_sync_batchnorm # type: ignore
_single_rank_pg = False
class ResBlock(Module):
"""Conv block in regnet with residual connection."""
def __init__(self, width_in, width_out):
super().__init__()
self.proj = Conv2d(width_in, width_out, (1, 1), (2, 2), bias=False)
self.bn = BatchNorm2d(width_out)
self.f = Sequential(
Sequential( # block a
Conv2d(width_in, width_out, (1, 1), (1, 1), bias=False),
BatchNorm2d(width_out),
ReLU(_relu_inplace),
),
Sequential( # block b
Conv2d(width_out, width_out, (3, 3), (2, 2), (1, 1), groups=2, bias=False),
BatchNorm2d(width_out),
ReLU(_relu_inplace),
),
Sequential( # block se
AdaptiveAvgPool2d((1, 1)),
Sequential(
Conv2d(width_out, 2, (1, 1), (1, 1), bias=False),
ReLU(_relu_inplace),
Conv2d(2, width_out, (1, 1), (1, 1), bias=False),
Sigmoid(),
),
),
Conv2d(width_out, width_out, (1, 1), (1, 1), bias=False), # block c
BatchNorm2d(width_out), # final_bn
)
self.relu = ReLU()
self.need_fsdp_wrap = True
def forward(self, x):
x = self.bn(self.proj(x)) + self.f(x)
return self.relu(x)
class Model(Module):
"""SSL model with trunk and head."""
def __init__(self, conv_bias, linear_bias):
super().__init__()
print(f"relu inplace: {_relu_inplace}, conv bias: {conv_bias}, linear bias: {linear_bias}")
self.trunk = Sequential()
self.trunk.need_fsdp_wrap = True # Set a flag for later wrapping.
stem = Sequential(Conv2d(2, 4, (3, 3), (2, 2), (1, 1), bias=conv_bias), BatchNorm2d(4), ReLU(_relu_inplace))
any_stage_block1_0 = ResBlock(4, 8)
self.trunk.add_module("stem", stem)
self.trunk.add_module("any_stage_block1", Sequential(any_stage_block1_0))
self.head = Sequential(
Sequential(Linear(16, 16, bias=linear_bias), ReLU(), Linear(16, 8, bias=linear_bias)), # projection_head
Linear(8, 15, bias=False), # prototypes0
)
def forward(self, x):
x = self.trunk(x).reshape(-1)
x = self.head(x)
return x
# We get a bit fancy here. Since the scope is `module`, this is run only
# once no matter how many tests variations for FSDP are requested to run
# to compare with the DDP reference. For example, a single DDP
# reference run is needed for both flatten and non-flatten param FSDP.
#
# Note, this runs DDP twice with and without mixed precision and asserts
# the resulting weights are different.
#
# This fixture captures and returns:
#
# - model state_dict before training
# - model data inputs
# - model state_dict after training
@pytest.fixture(scope="module")
def ddp_ref():
# Cover different bias flavors. Use random instead of parameterize them to reduce
# the test runtime. Otherwise, we would have covered all cases exhaustively.
conv_bias = True
if random.randint(0, 1) == 0:
conv_bias = False
linear_bias = True
if random.randint(0, 1) == 0:
linear_bias = False
# Get a reference model state
model = Model(conv_bias, linear_bias)
state_before = model.state_dict()
# Get reference inputs per rank.
world_size = _world_size
iterations = _iterations
print(f"Getting DDP reference for world_size {world_size} and iterations {iterations}")
inputs = [[] for i in range(world_size)]
for rank in range(world_size):
for i in range(iterations):
inputs[rank].append(torch.rand(2, 2, 2, 2))
# Run reference DDP training 4 times, fp and mp, sync_bn or not.
state_after = {}
for precision, sync_bn in product(["full", "mixed"], ["none", "pytorch"]):
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
rank_0_output = tempfile.mkstemp()[1]
try:
fsdp_config = None # This means we use DDP in _distributed_worker.
mp.spawn(
_distributed_worker,
args=(
world_size,
fsdp_config,
None,
precision == "mixed",
temp_file_name,
unused,
state_before,
inputs,
rank_0_output,
None,
sync_bn,
conv_bias,
linear_bias,
),
nprocs=world_size,
join=True,
)
state_after[(precision, sync_bn)] = torch.load(rank_0_output)
finally:
rmf(temp_file_name)
rmf(unused)
rmf(rank_0_output)
# Sanity check DDP's final states.
states = list(state_after.values())
for state in states[1:]:
assert state_dict_norm(states[0]) != state_dict_norm(state)
return state_before, inputs, conv_bias, linear_bias, state_after
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
yield temp_file_name, unused
# temp files could have been removed, so we use rmf.
rmf(temp_file_name)
rmf(unused)
def _distributed_worker(
rank,
world_size,
fsdp_config,
fsdp_wrap_bn,
ddp_mixed_precision,
tempfile_name,
unused,
state_before,
inputs,
rank_0_output,
state_after,
sync_bn,
conv_bias,
linear_bias,
):
torch.backends.cudnn.deterministic = True
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
ddp = True
if fsdp_config:
ddp = False
assert isinstance(fsdp_config, dict), str(fsdp_config)
if fsdp_config["mixed_precision"]:
# To match DDP in AMP -O1, we need fp32 reduce scatter.
fsdp_config["fp32_reduce_scatter"] = True
model = Model(conv_bias, linear_bias)
model.load_state_dict(state_before)
model = model.cuda()
class DummyScaler:
def scale(self, loss):
return loss
def step(self, optim):
optim.step()
def update(self):
pass
scaler = DummyScaler()
if ddp:
if sync_bn == "pytorch":
model = pytorch_bn_converter(model)
model = DDP(model, device_ids=[rank], broadcast_buffers=True)
if ddp_mixed_precision:
scaler = GradScaler()
else:
# Note, different rank may wrap in different order due to different random
# seeds. But results should be the same.
if random.randint(0, 1) == 0:
print(f"auto_wrap_bn {fsdp_wrap_bn}, then sync_bn {sync_bn}")
if fsdp_wrap_bn:
model = auto_wrap_bn(model, _single_rank_pg)
if sync_bn == "pytorch":
model = pytorch_bn_converter(model)
else:
print(f"sync_bn {sync_bn}, then auto_wrap_bn {fsdp_wrap_bn}")
if sync_bn == "pytorch":
model = pytorch_bn_converter(model)
if fsdp_wrap_bn:
model = auto_wrap_bn(model, _single_rank_pg)
model = FSDP(model, **fsdp_config).cuda()
if fsdp_config["mixed_precision"]:
scaler = ShardedGradScaler()
# Print the model for verification.
if rank == 0:
print(model)
optim = SGD(model.parameters(), lr=0.1)
loss_func = CrossEntropyLoss()
for in_data in inputs[rank]:
in_data = in_data.cuda()
context = contextlib.suppress()
if ddp and ddp_mixed_precision:
in_data = in_data.half()
context = torch.cuda.amp.autocast(enabled=True)
if not ddp and fsdp_config["mixed_precision"]:
context = torch.cuda.amp.autocast(enabled=True)
with context:
out = model(in_data)
fake_label = torch.zeros(1, dtype=torch.long).cuda()
loss = loss_func(out.unsqueeze(0), fake_label)
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
optim.zero_grad()
if ddp:
# Save the rank 0 state_dict to the output file.
if rank == 0:
state_after = model.module.cpu().state_dict()
torch.save(state_after, rank_0_output)
else:
model.assert_state(TrainingState.IDLE)
# Ensure final state equals to the state_after.
fsdp_state = model.state_dict()
# Move tensors to CPU to compare numerics.
for k, v in fsdp_state.items():
fsdp_state[k] = v.cpu()
# Change False to True to enable this when you want to debug the mismatch.
if False and rank == 0:
def dump(d):
for k, v in d.items():
print(k, v)
dump(state_after)
dump(fsdp_state)
# If sync_bn is used, all ranks should have the same state, so we can compare with
# rank 0 state on every rank. Otherwise, only compare rank 0 with rank 0.
if sync_bn != "none" or rank == 0:
assert objects_are_equal(state_after, fsdp_state, raise_exception=True)
teardown()
# We use strings for precision and flatten params instead of bool to
# make the pytest output more readable.
@pytest.mark.skip("broken at head")
@skip_if_single_gpu
@pytest.mark.parametrize("precision", ["full", "mixed"])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
@pytest.mark.parametrize("sync_bn", ["none", "pytorch"])
def test_regnet(temp_files, ddp_ref, precision, flatten, sync_bn):
if torch_version() < (1, 8, 0):
pytest.skip("pytorch version >= 1.8.0 required")
state_before, inputs, conv_bias, linear_bias, state_after = ddp_ref
state_after = state_after[(precision, sync_bn)]
fsdp_config = {}
fsdp_config["mixed_precision"] = precision == "mixed"
fsdp_config["flatten_parameters"] = flatten == "flatten"
# When linear bias is True, DDP's AMP O1 and FSDP's default AMP O1.5 is different,
# we force FSDP to use AMP O1 here by setting compute_dtype to float32.
if linear_bias:
fsdp_config["compute_dtype"] = torch.float32
if fsdp_config["mixed_precision"] and torch_cuda_version() < (11, 0):
pytest.skip("Only CUDA 11 is supported with AMP equivalency")
# Wrap BN half of the time.
wrap_bn = True
if random.randint(0, 1) == 0:
wrap_bn = False
# Except, always wrap BN in mixed precision + sync_bn mode, due to error of sync_bn wrapping,
# regardless of compute_dtype.
if fsdp_config["mixed_precision"] and sync_bn != "none":
wrap_bn = True
# When BN is not wrapped (i.e. not in full precision), FSDP's compute_dtype needs to
# be fp32 to match DDP (otherwise, numerical errors happen on BN's running_mean/running_var
# buffers).
if fsdp_config["mixed_precision"] and not wrap_bn:
fsdp_config["compute_dtype"] = torch.float32
world_size = _world_size
mp.spawn(
_distributed_worker,
args=(
world_size,
fsdp_config,
wrap_bn,
None,
temp_files[0],
temp_files[1],
state_before,
inputs,
None,
state_after,
sync_bn,
conv_bias,
linear_bias,
),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with shared weights between wrappers. """
from copy import deepcopy
import pytest
import torch
import torch.multiprocessing as mp
from torch.nn import Linear, Module
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import (
dist_init,
objects_are_equal,
skip_if_single_gpu,
teardown,
temp_files_ctx,
)
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
class Model(Module):
def __init__(self, with_fsdp=False, inner_flat=False, sharing=None):
super().__init__()
self.l0 = Linear(4, 4, bias=True).cuda()
self.l1 = Linear(4, 4, bias=True).cuda()
self.l2 = Linear(4, 4, bias=True).cuda()
self.l3 = Linear(4, 4, bias=True).cuda()
# share the weights. the layer must have at least 1 param is that's not
# shared. Therefore, we have bias=True and testing either sharing the
# weight or the bias.
if sharing == "share_only_weights":
self.l1.weight = self.l3.weight
elif sharing == "share_only_bias":
self.l1.bias = self.l3.bias
else:
assert sharing is None or sharing == "share_none"
if with_fsdp:
# Shared layers much be un-flatten.
self.l1 = FSDP(self.l1, flatten_parameters=False)
self.l2 = FSDP(self.l2, flatten_parameters=inner_flat)
self.l3 = FSDP(self.l3, flatten_parameters=False)
if sharing in ["share_only_weights"]:
self.l3.append_shared_param(self.l1.module.weight)
if sharing in ["share_only_bias"]:
self.l3.append_shared_param(self.l1.module.bias)
def forward(self, x):
x = self.l0(x)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
return x
# A fixture to get tempfiles and ensure they are cleaned up.
@pytest.fixture()
def temp_files():
# dist_init needs 2 files + 3 files for before state, after state, in_data.
with temp_files_ctx(5) as files:
yield files
@skip_if_single_gpu
@pytest.mark.parametrize("outer_flat", ["outer_flat", "outer_nonflat"])
@pytest.mark.parametrize("inner_flat", ["inner_flat", "inner_nonflat"])
@pytest.mark.parametrize("sharing", ["share_none", "share_only_weights", "share_only_bias"])
def test_shared_weight(temp_files, outer_flat, inner_flat, sharing):
"""Test FSDP with a model with shared weights."""
outer_flat = outer_flat == "outer_flat"
inner_flat = inner_flat == "inner_flat"
world_size = 2
# Get reference results.
model = Model(sharing=sharing)
sd_before = deepcopy(model.state_dict())
in_data = torch.rand(1, 4).cuda()
_train(model, in_data, world_size)
sd_after = deepcopy(model.state_dict())
# Before and after state should not be equal.
assert not objects_are_equal(sd_before, sd_after)
# Save data
torch.save(sd_before, temp_files[2])
torch.save(sd_after, temp_files[3])
torch.save(in_data, temp_files[4])
# Run FSDP
mp.spawn(
_dist_worker,
(world_size, temp_files, outer_flat, inner_flat, sharing),
nprocs=world_size,
)
def _dist_worker(rank, world_size, files, outer_flat, inner_flat, sharing):
# Get data from files.
file1, file2, sd_before, sd_after, in_data = files
sd_before = torch.load(sd_before, map_location=lambda storage, loc: storage.cuda(rank))
sd_after = torch.load(sd_after, map_location=lambda storage, loc: storage.cuda(rank))
in_data = torch.load(in_data, map_location=lambda storage, loc: storage.cuda(rank))
result = dist_init(rank=rank, world_size=world_size, filename=file1, filename_rpc=file2)
assert result, "Dist init failed"
fsdp_model = FSDP(Model(with_fsdp=True, inner_flat=inner_flat, sharing=sharing), flatten_parameters=outer_flat)
fsdp_model.load_state_dict(sd_before)
_train(fsdp_model, in_data)
objects_are_equal(sd_after, fsdp_model.state_dict(), raise_exception=True)
teardown()
def _train(model, in_data, steps_per_iter=1):
optim = SGD(model.parameters(), lr=0.1)
for _ in range(3):
# Simulate multiple ranks.
for _ in range(steps_per_iter):
out = model(in_data)
out.sum().backward()
# Simulate gradient means between ranks.
if steps_per_iter > 1:
with torch.no_grad():
for p in model.parameters():
p.grad /= steps_per_iter
optim.step()
model.zero_grad(set_to_none=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with nested wrapping multiple times. """
import tempfile
import pytest
import torch
import torch.multiprocessing as mp
from torch.nn import Linear, Module, Sequential
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import dist_init, skip_if_no_cuda, teardown
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import TrainingState
def _test_func(rank, world_size, fsdp_config, tempfile_name, unused):
result = dist_init(rank, world_size, tempfile_name, unused)
assert result, "Dist init failed"
assert isinstance(fsdp_config, dict), str(fsdp_config)
class InnerModel(Module):
def __init__(self):
super().__init__()
self.layers = Sequential(
FSDP(Linear(5, 5), **fsdp_config),
)
def forward(self, x):
return self.layers(x)
inner_model = InnerModel()
model = FSDP(inner_model, **fsdp_config).cuda()
optim = SGD(model.parameters(), lr=0.1)
for i in range(3):
input = torch.rand((1, 5), dtype=torch.float).cuda()
input.requires_grad = True
output = model(input)
output.sum().backward()
optim.step()
optim.zero_grad()
input = torch.rand((1, 5), dtype=torch.float).cuda()
output = model(input)
model.assert_state(TrainingState.IDLE)
# second time to rewrap the inner model
rewrapped_model = FSDP(inner_model, **fsdp_config).cuda()
rewrapped_output = rewrapped_model(input)
assert torch.allclose(output, rewrapped_output)
teardown()
# We use strings for precision and flatten instead of bool to
# make the pytest output more readable.
@skip_if_no_cuda
@pytest.mark.parametrize("world_size", [1, 2] if torch.cuda.device_count() > 1 else [1])
@pytest.mark.parametrize("precision", ["full", "mixed"])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
def test(world_size, precision, flatten):
"""
This test simulates wrapping the module after training to run inference.
This is required in cases where later in a session, the model is wrapped again in FSDP but
contains nested FSDP wrappers within the module.
"""
if torch_version() < (1, 6, 0):
pytest.skip("older pytorch doesn't support reduce_scatter")
temp_file_name = tempfile.mkstemp()[1]
unused = tempfile.mkstemp()[1]
fsdp_config = {
"mixed_precision": precision == "mixed",
"flatten_parameters": flatten == "flatten",
}
mp.spawn(
_test_func,
args=(world_size, fsdp_config, temp_file_name, unused),
nprocs=world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import unittest
from parameterized import parameterized
import pytest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import dist_init, objects_are_equal, skip_if_cuda, teardown, temp_files_ctx
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from .test_fsdp import (
CONFIG_OPTIONS,
DistributedTest,
NestedWrappedModule,
TransformerWithSharedParams,
rename_test,
spawn_and_init,
)
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestLocalStateDict(DistributedTest):
@parameterized.expand([[True, True], [False, False]], name_func=rename_test)
def test_load_local_state_dict(self, flatten_params, mixed_precision):
test_fn = functools.partial(
self._load_local_and_train, {"flatten_parameters": flatten_params, "mixed_precision": mixed_precision}
)
spawn_and_init(test_fn)
@classmethod
def _load_local_and_train(self, config, rank, group, d_model=16, d_vocab=23):
"""Check that local_state_dict can be saved and loaded for a given worker, and that training updates it"""
model = self.get_wrapped_model(
group, cuda_first=False, config=config, d_vocab=d_vocab, d_model=d_model, add_bn=False
) # Set bn=True here to show that BN doesn't get updated
state_1 = model.local_state_dict()
state_before_training = {k: v.cpu().clone() for k, v in state_1.items()}
assert len(state_1) > 0
model.load_local_state_dict(state_1)
weight_key = "flat_param_0" if model.flatten_parameters else "embed_tokens.weight"
state_1_weight = state_1[weight_key]
assert state_1_weight.dtype == torch.float32, f"got dtype {state_1_weight.dtype} expected torch.float32"
if not model.flatten_parameters:
# The weight will be sharded since we access module.state_dict directly
state_1_module_weight = model.module.state_dict()[weight_key]
torch.testing.assert_allclose(state_1_weight, state_1_module_weight)
torch.testing.assert_allclose(state_1_weight, model.module.embed_tokens.weight)
# increasing number of epochs from 1 to 6 for ShardedGradScaler to work properly.
# test fails for num_epochs < 6 since the updates are skipped due to gradient being inf.
self._train_for_several_steps(model, 6, model.mixed_precision)
state_2 = model.local_state_dict()
state_after_training = {k: v.cpu().clone() for k, v in state_2.items()}
model.load_local_state_dict(state_2)
assert state_1.keys() == state_2.keys()
# Assert that parameters were updated since before training
unchanged = []
unwrapped_model = model.module.module
buffers = {name for name, _ in unwrapped_model.named_buffers()}
for k in state_1:
if (state_before_training[k] == state_after_training[k]).all() and (k not in buffers):
unchanged.append(k)
if unchanged:
raise AssertionError(f"params {unchanged} not changed after training")
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestSaveLoadStateDict(DistributedTest):
@parameterized.expand([[False], [True]], name_func=rename_test)
def test_calling_state_dict_twice_mixed_precision(self, mixed_precision):
test_fn = functools.partial(
self._test_calling_state_dict_twice, {"flatten_parameters": False, "mixed_precision": mixed_precision}
)
spawn_and_init(test_fn)
@classmethod
def _test_calling_state_dict_twice(self, config, rank, group, **model_kwargs):
ddp_model = self.get_wrapped_model(group, cuda_first=False, config=config, **model_kwargs)
autocast = ddp_model.mixed_precision
self._train_for_several_steps(ddp_model, 1, autocast)
ddp_model.state_dict()
ddp_model.state_dict() # second call
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_state_dict_after_forward(self, config):
test_fn = functools.partial(self._test_module_state_dict, config)
spawn_and_init(test_fn)
@parameterized.expand([[False], [True]], name_func=rename_test)
def test_state_dict_before_forward(self, mixed_precision):
test_fn = functools.partial(
self._test_state_dict_before_forward, {"flatten_parameters": False, "mixed_precision": mixed_precision}
)
spawn_and_init(test_fn)
@classmethod
def _test_state_dict_before_forward(cls, config, rank, group):
ddp_model = cls.get_wrapped_model(group, cuda_first=False, config=config)
sd = ddp_model.state_dict()
for param_name in ("embed_tokens.weight", "vocab_bias"):
wt = sd[param_name]
assert wt.dtype == torch.float32, f"got dtype {wt.dtype} for {param_name}, expected torch.float32"
cls._train_for_several_steps(ddp_model, 1, ddp_model.mixed_precision)
@classmethod
def _test_module_state_dict(cls, config, rank, group):
ddp_model = cls.get_wrapped_model(group, cuda_first=False, config=config)
autocast = ddp_model.mixed_precision
cls._train_for_several_steps(ddp_model, 2, autocast)
state_1 = ddp_model.state_dict()
# You must make a new FSDP instance to use module.load_state_dict
unwrapped_model = TransformerWithSharedParams(group)
unwrapped_model.load_state_dict(state_1)
new_ddp_model = FSDP(unwrapped_model, group, **config).cuda()
cls._train_for_several_steps(new_ddp_model, 2, autocast)
try:
ddp_model.load_state_dict(new_ddp_model.state_dict())
assert False, "ddp_model.load_state_dict(new_ddp_model.state_dict()) succeeded"
except Exception:
pass
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_wrapped_model(self, config):
test_fn = functools.partial(self._test_nested_wrapped_model, config=config)
spawn_and_init(test_fn)
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_nested_wrapped_model_local_state_dict(self, config):
test_fn = functools.partial(self._test_nested_wrapped_model_local_state_dict, config=config)
spawn_and_init(test_fn)
@classmethod
def _test_nested_wrapped_model(cls, rank, group, config=None):
# Get reference state dict without any nested FSDP instances.
model = NestedWrappedModule(group, None).cuda()
model = nn.parallel.DistributedDataParallel(model, device_ids=[rank], output_device=rank, process_group=group)
cls._train_for_several_steps(model, 2, autocast=config["mixed_precision"])
ref_state_dict = {k: v.clone() for k, v in model.module.state_dict().items()}
# Create a nested FSDP-wrapped instance.
if config["mixed_precision"]:
config["compute_dtype"] = torch.float32
model = NestedWrappedModule(group, config)
model = FSDP(model, group, **config).cuda()
cls._train_for_several_steps(model, 2, autocast=config["mixed_precision"])
# Round-trip state dict save/load/save.
state_dict = {k: v.clone() for k, v in model.state_dict().items()}
model.load_state_dict(state_dict)
state_dict = model.state_dict()
assert ref_state_dict.keys() == state_dict.keys(), f"{ref_state_dict.keys()} != {state_dict.keys()}"
for key in ref_state_dict.keys():
assert objects_are_equal(
ref_state_dict[key], state_dict[key], raise_exception=False
), f"{key}, {ref_state_dict[key]} != {state_dict[key]}"
@classmethod
def _test_nested_wrapped_model_local_state_dict(cls, rank, group, config=None, local=None):
# Create a nested FSDP-wrapped instance.
model = NestedWrappedModule(group, config)
model = FSDP(model, group, **config).cuda()
cls._train_for_several_steps(model, 2, autocast=config["mixed_precision"])
# Round trip state dict save/load/save.
ref_state_dict = {k: v.clone() for k, v in model.local_state_dict().items()}
model.load_local_state_dict(ref_state_dict)
state_dict = model.local_state_dict()
assert ref_state_dict.keys() == state_dict.keys(), f"{ref_state_dict.keys()} != {state_dict.keys()}"
for key in ref_state_dict.keys():
assert objects_are_equal(
ref_state_dict[key], state_dict[key], raise_exception=False
), f"{key}, {ref_state_dict[key]} != {state_dict[key]}"
@pytest.mark.skipif(torch_version() < (1, 8, 0), reason="pytorch version >= 1.8.0 required")
class TestStateDictDeviceDtype(DistributedTest):
@parameterized.expand([[False, False], [True, False], [True, True]], name_func=rename_test)
def test_state_dict_device(self, mixed_precision, cpu_offload):
test_fn = functools.partial(
self._test_state_dict_device, {"cpu_offload": cpu_offload, "mixed_precision": mixed_precision}
)
spawn_and_init(test_fn)
@parameterized.expand([[False, False], [True, False], [True, True], [False, True]], name_func=rename_test)
def test_state_dict_device_cuda(self, mixed_precision, cpu_offload):
test_fn = functools.partial(
self._test_state_dict_device,
{"cpu_offload": cpu_offload, "mixed_precision": mixed_precision, "state_dict_device": torch.device("cuda")},
)
spawn_and_init(test_fn)
@parameterized.expand([[False, False], [True, False], [True, True], [False, True]], name_func=rename_test)
def test_state_dict_device_cpu(self, mixed_precision, cpu_offload):
test_fn = functools.partial(
self._test_state_dict_device,
{"cpu_offload": cpu_offload, "mixed_precision": mixed_precision, "state_dict_device": torch.device("cpu")},
)
spawn_and_init(test_fn)
def test_state_dict_device_pure_fp16(self):
test_fn = functools.partial(
self._test_state_dict_device,
{"cpu_offload": False, "mixed_precision": False, "compute_dtype": torch.float16},
# pure_fp16 is similar to the --memory-efficient-fp16 option in fairseq
pure_fp16=True,
)
spawn_and_init(test_fn)
@classmethod
def _test_state_dict_device(self, config, rank, group, pure_fp16=False, **model_kwargs):
model = TransformerWithSharedParams(group, **model_kwargs)
if pure_fp16:
assert not config["mixed_precision"]
model = model.half()
fsdp_model = FSDP(model, group, **config)
if not config["cpu_offload"]:
fsdp_model = fsdp_model.cuda()
autocast = fsdp_model.mixed_precision or pure_fp16
self._train_for_several_steps(fsdp_model, 1, autocast)
state_dict = fsdp_model.state_dict()
state_dict_device = config.get("state_dict_device")
for k, v in state_dict.items():
if config["cpu_offload"] or (state_dict_device is not None and state_dict_device.type == "cpu"):
assert v.device.type == "cpu", v.device.type
else:
assert v.device.type == "cuda", v.device.type
expected_dtype = torch.float16 if pure_fp16 else torch.float32
for k, v in state_dict.items():
if not torch.is_floating_point(v):
continue
assert v.dtype == expected_dtype, f"{v.dtype} != {expected_dtype}"
@skip_if_cuda
def test_local_state_dict_calls_state_dict_recursion():
"""Testing the case of infinite recursive when FSDP is subclassed"""
class TestModule(FSDP):
def __init__(self):
super().__init__(module=nn.Linear(100, 100))
def state_dict(self, *args, **kwargs):
return self.local_state_dict(*args, **kwargs)
rank = 0
world_size = 1
with temp_files_ctx(2) as temp_files:
result = dist_init(rank, world_size, temp_files[0], temp_files[1])
assert result, "Dist init failed"
m = TestModule()
d = m.state_dict()
teardown()
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP and ensure expected overlapping between all_gather and forward. """
from statistics import mean
import time
from unittest.mock import patch
import pytest
import torch
from torch.cuda import Event
import torch.multiprocessing as mp
import torch.nn as nn
from fairscale.fair_dev.testing.testing import (
dist_init,
get_cycles_per_ms,
skip_if_single_gpu,
teardown,
temp_files_ctx,
)
from fairscale.internal import torch_version
from fairscale.nn import enable_wrap, wrap
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
class Layer(nn.Module):
def __init__(self, compute_cycles, has_params: bool):
super().__init__()
self.sleep_cycles = compute_cycles
self.optional_param = None
if has_params:
self.optional_param = nn.Parameter(torch.rand(1))
def forward(self, x):
# Get 2 events.
self.e1 = Event(enable_timing=True)
self.e2 = Event(enable_timing=True)
# Record the fake forward compute time.
self.e1.record()
if self.sleep_cycles > 0:
torch.cuda._sleep(self.sleep_cycles)
if self.optional_param is not None:
x = x + self.optional_param # force the param to be part of the graph
self.e2.record()
return x
def get_time(self):
# return the recorded duration.
return self.e1.elapsed_time(self.e2)
def _create_model(fsdp_config, compute_cycles, has_params: bool):
with enable_wrap(wrapper_cls=FSDP, **fsdp_config):
model = wrap(
nn.Sequential(
wrap(Layer(compute_cycles, has_params)),
wrap(Layer(compute_cycles, has_params)),
wrap(Layer(compute_cycles, has_params)),
wrap(Layer(compute_cycles, has_params)),
)
).cuda()
return model
class Min10:
def __init__(self):
self.data = []
def add(self, new_data):
if len(self.data) < 10:
self.data.append(new_data)
else:
self.data = sorted(self.data)
if new_data < self.data[-1]:
self.data[-1] = new_data
def avg(self):
return mean(self.data)
def _distributed_worker(
gpu_id,
world_size,
fsdp_config,
tempfile,
tempfile_rpc,
):
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, tempfile, tempfile_rpc)
assert result, "Dist init failed"
# Save the original torch.distributed.all_gather function since we will
# patch it to include an artificial delay.
orig_all_gather = torch.distributed.all_gather
orig_all_gather_base = (
torch.distributed._all_gather_base if hasattr(torch.distributed, "_all_gather_base") else None
)
def run(compute_cycles, all_gather_cycles):
has_params = all_gather_cycles > 0
model = _create_model(fsdp_config, compute_cycles, has_params)
# Get the input and sets the input's requires_grad to True because
# we have a fake compute in the forward pass.
batch = torch.rand(1).cuda()
batch.requires_grad = True
# We run 20 iterations but only collect timing data from the minimal 10
# data points because nondeterministic system events can disturb the timing.
cpu_iter = Min10()
cpu_wait = Min10()
gpu_compute = Min10()
gpu_total = Min10()
for _ in range(20):
# Get two events for measuring the overall time.
e1 = Event(enable_timing=True)
e2 = Event(enable_timing=True)
cpu_start = time.process_time()
all_gather_called = False
all_gather_base_called = False
def _delayed_all_gather(*args, **kwargs):
nonlocal all_gather_called
all_gather_called = True
torch.cuda._sleep(all_gather_cycles)
return orig_all_gather(*args, **kwargs)
def _delayed_all_gather_base(*args, **kwargs):
nonlocal all_gather_base_called
all_gather_base_called = True
torch.cuda._sleep(all_gather_cycles)
assert orig_all_gather_base
return orig_all_gather_base(*args, **kwargs)
method_string_all_gather_base = "torch.distributed._all_gather_base"
if hasattr(torch.distributed, "_all_gather_base") is False:
# no such method, to make mock_all_gather_base 0 invocation, use an impossible name
method_string_all_gather_base = "math.nan"
pass
# forward pass
#
# Even though both e1 & e2 are on the compute stream, since
# compute depends on all_gather, e2-e1 includes all_gather time.
e1.record()
with patch("torch.distributed.all_gather", _delayed_all_gather):
with patch(method_string_all_gather_base, _delayed_all_gather_base):
out = model(batch)
if has_params and world_size > 1:
assert all_gather_called or all_gather_base_called
else:
assert not all_gather_called and not all_gather_base_called
e2.record()
# backward pass
out.backward()
if torch_version() >= (1, 7, 0):
model.zero_grad(set_to_none=True)
else:
for p in model.parameters():
p.grad = None
cpu_iter_time = time.process_time() - cpu_start
# wait for gpu
out.item()
cpu_wait_for_gpu_time = time.process_time() - cpu_start - cpu_iter_time
# get sum of the compute time
times = []
for mod in model.modules():
if not isinstance(mod, Layer):
continue
times.append(mod.get_time())
# get gpu compute + all_gather time
overall_gpu_time = e1.elapsed_time(e2)
cpu_iter.add(cpu_iter_time)
cpu_wait.add(cpu_wait_for_gpu_time)
gpu_compute.add(sum(times))
gpu_total.add(overall_gpu_time)
del model
return {
"cpu_iter": cpu_iter.avg(),
"cpu_wait": cpu_wait.avg(),
"gpu_compute": gpu_compute.avg(),
"gpu_total": gpu_total.avg(),
}
sleep_cycles = int(100 * get_cycles_per_ms())
e1 = run(0, 0) # no compute, no all-gather
e2 = run(0, sleep_cycles) # no compute, only all-gather
e3 = run(sleep_cycles, 0) # only compute, no all-gather
e4 = run(sleep_cycles, sleep_cycles) # both compute and all-gather
debug_string = f"\nrank{rank}:\n e1: {e1}\n e2: {e2}\n e3: {e3}\n e4: {e4}"
print(debug_string)
# Check the cpu/gpu timing. CPU should run ahead of GPU. Therefore, cpu-gpu
# wait should be long, except when there is no real work on GPU.
#
# If the assertions fail below, we likely have a cpu-gpu wait in the forward/backward pass.
short = [e1["cpu_iter"], e2["cpu_iter"], e3["cpu_iter"], e4["cpu_iter"], e1["cpu_wait"]]
long = [e3["cpu_wait"], e4["cpu_wait"]]
if world_size == 1:
short.append(e2["cpu_wait"]) # all gather should not be happening.
else:
long.append(e2["cpu_wait"]) # all gather should happen and prolong the cpu-gpu wait.
for s in short:
for l in long:
# 5X longer is a safe margin, since the GPU work timing is around 100X more
# of that of the CPU.
assert s * 5 < l, f"{s} * 5 < {l} in " + debug_string
# Check the GPU timing.
short = [e1["gpu_compute"], e1["gpu_total"], e2["gpu_compute"]]
long = [e3["gpu_compute"], e3["gpu_total"], e4["gpu_compute"], e4["gpu_total"]]
if world_size == 1:
short.append(e2["gpu_total"]) # all gather should not be happening.
else:
long.append(e2["gpu_total"]) # all gather should happen and prolong the cpu-gpu wait.
for s in short:
for l in long:
# 10X longer is a safe margin, since the time is around 100X longer
# when there is work on GPU vs. no work.
assert s * 10 < l, f"{s} * 10 < {l} in " + debug_string
# Check the GPU overlapping when there is all-gather.
if world_size > 1:
compute_only = e3["gpu_compute"]
all_gather_only = e2["gpu_total"]
both = e4["gpu_total"]
assert compute_only + all_gather_only > 1.1 * both, (
f"{compute_only} + {all_gather_only} > 1.1 * {both} in " + debug_string
)
teardown()
@skip_if_single_gpu
@pytest.mark.parametrize("world_size", [1, 2])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
@pytest.mark.parametrize("mixed", ["mixed", "full"])
def test_forward_overlap(world_size, flatten, mixed):
fsdp_config = {
"flatten_parameters": flatten == "flatten",
"mixed_precision": mixed == "mixed",
}
with temp_files_ctx(2) as temp_files:
mp.spawn(
_distributed_worker,
(world_size, fsdp_config, temp_files[0], temp_files[1]),
nprocs=world_size,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import functools
from time import time
import unittest
from parameterized import parameterized
import torch
from torch import nn
from torch.optim import SGD, Adadelta, Adam # type: ignore
from fairscale.fair_dev.testing.testing import dist_init, objects_are_equal, spawn_for_all_world_sizes
from fairscale.internal.params import recursive_copy_to_device
from fairscale.nn.data_parallel import FullyShardedDataParallel, get_fsdp_instances
from fairscale.nn.data_parallel.fsdp_optim_utils import is_singleton_tensor
from .test_fsdp import (
DistributedTest,
DummyProcessGroup,
MixtureOfExperts,
TransformerWithSharedParams,
rename_test,
spawn_and_init,
)
def all_tensors_numel_except_for_step(dct):
"""Compute the sum of numel from all tensors from a dict, except when the key is `step`."""
ret = 0
for k, v in dct.items():
if k != "step" and torch.is_tensor(v):
ret += v.numel()
return ret
def assert_equal(a, b):
assert a == b, f"{a} != {b}"
def spawn_and_init_multiple_groups(fn, args=None, **spawn_kwargs):
if args is None:
args = ()
run_fn = functools.partial(init_and_run, fn, args)
spawn_for_all_world_sizes(run_fn, **spawn_kwargs)
def _find_my_group_index(grouped_ranks):
"""Return the index corresponding to the MoE group of the current process."""
my_rank = torch.distributed.get_rank()
for i, group in enumerate(grouped_ranks):
if my_rank in group:
return i
raise RuntimeError(f"Unable to find process rank {my_rank} in the set of grouped ranks {grouped_ranks}.")
def get_moe_group(moe_expert_count=2):
"""Return a process group for initializing a MoE layer."""
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
# If you have more experts than the world size.
if world_size <= moe_expert_count:
assert moe_expert_count % world_size == 0
moe_groups = [[i] for i in range(world_size)]
# If you have a larger world size than experts.
else:
assert world_size % moe_expert_count == 0
ranks_per_group = world_size // moe_expert_count
moe_groups = [[i + j * moe_expert_count for j in range(ranks_per_group)] for i in range(moe_expert_count)]
moe_pgs = [torch.distributed.new_group(g) for g in moe_groups]
# Find the index in the set of moe_groups which contains the current rank.
my_group_idx = _find_my_group_index(moe_groups)
return moe_pgs[my_group_idx]
else:
return torch.distributed.new_group([torch.distributed.get_rank()])
def init_and_run(fn, args, rank, world_size, filename, filename_rpc):
"""Initialize and run the unit test for testing replicated MoE groups."""
dist_init(rank, world_size, filename, filename_rpc)
torch.cuda.set_device(rank)
group = torch.distributed.new_group()
# Specify the moe_group used to initialize the MoE layers with.
fn(rank, group, *args, expert_group=get_moe_group())
class TestOptimizerUtils(DistributedTest):
@parameterized.expand(
[[functools.partial(SGD, momentum=0.9), True], [SGD, False], [Adam, False], [Adadelta, True], [Adam, True]],
name_func=rename_test,
)
def test_consolidate_optimizer(self, optim_fn, transformer):
config = {"mixed_precision": True, "flatten_parameters": True}
config["compute_dtype"] = torch.float32
test_fn = functools.partial(
self._test_consolidated_optimizer, config, optim_fn=optim_fn, transformer=transformer
)
spawn_and_init(test_fn, world_sizes=[min(torch.cuda.device_count(), 4)])
@parameterized.expand(
[[SGD, False], [Adam, False]],
name_func=rename_test,
)
def test_consolidate_optimizer_diff_world_size(self, optim_fn, transformer):
if torch.cuda.device_count() < 4:
raise unittest.SkipTest("This test requires at least 4 GPUs.")
config = {"mixed_precision": True, "flatten_parameters": True}
config["compute_dtype"] = torch.float32
test_fn = functools.partial(self._test_consolidated_optimizer, config, optim_fn=Adam, transformer=transformer)
spawn_and_init_multiple_groups(test_fn, world_sizes=[min(torch.cuda.device_count(), 4)])
@classmethod
def _test_consolidated_optimizer(
self, config, rank, group, optim_fn=torch.optim.SGD, transformer=False, expert_group=None
):
"""FSDP.gather_full_optim_state_dict() should return something very similar to optimizer.state_dict()"""
# Establish reference behavior.
if transformer:
unwrapped_model = TransformerWithSharedParams(group, wrapper_config=config).cuda()
fsdp = self.get_wrapped_model(group, config=config).cuda()
else:
unwrapped_model = MixtureOfExperts(group, wrapper_config=None, expert_group=expert_group).cuda()
fsdp = FullyShardedDataParallel(
MixtureOfExperts(group, wrapper_config=config, expert_group=expert_group)
).cuda()
try:
fsdp_optim = optim_fn(
fsdp.parameters(),
lr=0.01,
)
optim_unwrapped = optim_fn(unwrapped_model.parameters(), lr=0.01)
except TypeError: # Adadelta
fsdp_optim = optim_fn(fsdp.parameters())
optim_unwrapped = optim_fn(unwrapped_model.parameters())
fsdp_optim.zero_grad()
optim_unwrapped.zero_grad()
with torch.cuda.amp.autocast(enabled=True):
x = fsdp.module.get_input(torch.device("cuda"))
output = fsdp(*x)
loss = fsdp.module.get_loss(x, output).to("cuda")
fsdp.module.run_backward(loss)
fsdp_optim.step()
output = unwrapped_model(*x)
loss = unwrapped_model.get_loss(x, output)
unwrapped_model.run_backward(loss)
optim_unwrapped.step()
unwrapped_sd = optim_unwrapped.state_dict()
if not transformer and not expert_group:
no_broadcast_children = [x for x in get_fsdp_instances(fsdp) if x.no_broadcast_optim_state]
assert len(no_broadcast_children) == 1, f"Length of non shared params {len(no_broadcast_children)}"
assert get_fsdp_instances(fsdp)[-1].no_broadcast_optim_state
torch.cuda.empty_cache()
cuda_gb_before = torch.cuda.memory_stats(fsdp.rank)["allocated_bytes.all.current"] / 1024**3
tstart = time()
sd = fsdp.gather_full_optim_state_dict(fsdp_optim, recipient_rank=0)
duration = time() - tstart
assert duration < fsdp.world_size, f"gather optim state took {duration} seconds, suspect change in _consolidate"
cuda_gb_after = torch.cuda.memory_stats(fsdp.rank)["allocated_bytes.all.current"] / 1024**3
mem_usg_gb = cuda_gb_after - cuda_gb_before
assert mem_usg_gb == 0, f"gather_full_optim_state_dict used {mem_usg_gb:.2f} CUDA GB, max allowed is 0"
assert cuda_gb_after > 0, "got 0 memory usage, logging is broken"
if fsdp.rank > 0:
assert sd is None
return
# assert whole state dict on CPU
for k, v in sd["state"].items():
for buffer_name, t in v.items():
if torch.is_tensor(t):
msg = f"got device {t.device} for {k}: {buffer_name}. expected CPU"
assert t.device == torch.device("cpu"), msg
if expert_group:
sd_state = recursive_copy_to_device(sd["state"], non_blocking=False, device="cpu")
orig_state = recursive_copy_to_device(unwrapped_sd["state"], non_blocking=False, device="cpu")
assert_equal(len(sd_state.keys()), len(orig_state.keys()))
assert_equal(
sum([all_tensors_numel_except_for_step(v) for k, v in sd_state.items()]),
sum([all_tensors_numel_except_for_step(v) for k, v in orig_state.items()]),
)
return
assert "uncollected_local_ids" in sd
sd_copy = copy.deepcopy(sd)
unflat_state = sd_copy["state"]
shard_sd = fsdp.get_shard_from_optim_state_dict(sd_copy)
shard_sd = recursive_copy_to_device(shard_sd, non_blocking=False, device="cpu")
state_after_get_shard = sd_copy["state"]
# sd is changed in-place in case there are extra states.
assert not objects_are_equal(unflat_state, state_after_get_shard)
del sd_copy
assert_equal(len(sd["state"]), len(unwrapped_sd["state"]))
assert_equal(len(sd["param_groups"][0]["params"]), len(unwrapped_sd["param_groups"][0]["params"]))
assert_equal(
sum([all_tensors_numel_except_for_step(v) for k, v in sd["state"].items()]),
sum([all_tensors_numel_except_for_step(v) for k, v in unwrapped_sd["state"].items()]),
)
original_shard_sd = fsdp_optim.state_dict()
assert_equal(len(shard_sd["state"]), len(original_shard_sd["state"]))
assert_equal(shard_sd.keys(), original_shard_sd.keys())
original_shard_sd = recursive_copy_to_device(original_shard_sd, non_blocking=False, device="cpu")
# Before asserting that the dicts are equal, we check keys individually to allow nice tracebacks.
assert_equal(
[all_tensors_numel_except_for_step(v) for k, v in shard_sd["state"].items()],
[all_tensors_numel_except_for_step(v) for k, v in original_shard_sd["state"].items()],
)
assert_equal(
[v for k, v in shard_sd["param_groups"][0].items()],
[v for k, v in original_shard_sd["param_groups"][0].items()],
)
objects_are_equal(shard_sd["state"], original_shard_sd["state"], raise_exception=True)
objects_are_equal({k: shard_sd[k] for k in original_shard_sd}, original_shard_sd, raise_exception=True)
@parameterized.expand(
[(True,), (False,)],
name_func=rename_test,
)
def test_model_with_unused_params(self, wrap_l2):
"""Test handling of model with unused params by gather_full_optim_state_dict()"""
test_fn = functools.partial(self._test_model_with_unused_params, wrap_l2=wrap_l2)
spawn_and_init(test_fn, world_sizes=[2])
@classmethod
def _test_model_with_unused_params(self, rank, pg, wrap_l2):
model = ModelWithUnusedParams(wrap_l2).cuda()
data = torch.rand(4).cuda().requires_grad_(True)
model = FullyShardedDataParallel(model)
optim = SGD(model.parameters(), momentum=0.9, lr=0.1)
out = model(data).sum()
out.backward()
optim.step()
model.zero_grad(set_to_none=True)
sd = model.gather_full_optim_state_dict(optim)
if rank == 0:
shard_sd = model.get_shard_from_optim_state_dict(sd)
orig_sd = optim.state_dict()
orig_sd = recursive_copy_to_device(orig_sd, non_blocking=False, device="cpu")
objects_are_equal(shard_sd, orig_sd, raise_exception=True)
else:
assert sd is None, sd
def test_named_params_ordering(self):
"""Test assumption of consolidate_optimizer_state_dict"""
group = DummyProcessGroup(0, 1)
model = TransformerWithSharedParams(group)
named_pars = [p for n, p in model.named_parameters()]
for i, p in enumerate(model.parameters()):
objects_are_equal(p, named_pars[i], raise_exception=True)
def test_is_singleton_tensor(self):
"""Test is_singleton_tensor function"""
assert is_singleton_tensor(torch.tensor(4.0))
assert not is_singleton_tensor(torch.tensor([4.0]))
assert not is_singleton_tensor(torch.tensor([4.0, 5.0]))
assert not is_singleton_tensor([4.0])
assert not is_singleton_tensor(4.0)
class ModelWithUnusedParams(nn.Module):
def __init__(self, wrap_l2):
super().__init__()
self.l = nn.Linear(4, 4)
# unused param must be wrapped, otherwise, due to flatten, it
# is always used.
self.not_trained = nn.Linear(4, 4).requires_grad_(False)
self.not_trained = FullyShardedDataParallel(self.not_trained)
# optionally testing a used param after the unused one by
# wrapping it.
self.l2 = nn.Linear(4, 4)
if wrap_l2:
# When wrapping happens, the unused param will be in the middle
# of the param list (for optimizer state dict), not at the
# end. This way, we can test the handling code in more corner
# cases.
self.l2 = FullyShardedDataParallel(self.l2)
def forward(self, x):
with torch.no_grad():
y = self.not_trained(x)
return self.l2(self.l(x)) - y
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test FSDP with multiple forward pass + checkpoint. """
import contextlib
import pickle
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.optim as optim
from fairscale.fair_dev.testing.testing import dist_init, skip_if_single_gpu, teardown, temp_files_ctx
from fairscale.internal import torch_version
from fairscale.nn import checkpoint_wrapper
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import auto_wrap_bn
from fairscale.nn.wrap import enable_wrap, wrap
class Model(nn.Module):
"""Model to test FSDP(checkpoint())."""
def __init__(self):
super().__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=True))
self.block2 = nn.Sequential(
nn.Conv2d(4, 8, kernel_size=3),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Flatten(),
)
self.head = nn.Linear(8, 10)
def forward(self, x):
if isinstance(x, torch.Tensor):
return self.head(self.block2(self.block1(x)))
elif isinstance(x, list):
ys = [self.head(self.block2(self.block1(e))) for e in x]
return torch.cat(ys, dim=0)
class Model2(nn.Module):
"""Model to test FSDP(checkpoint(), checkpoint())."""
def __init__(self):
super().__init__()
self.block1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=True))
self.block2 = nn.Sequential(nn.Conv2d(4, 4, kernel_size=3), nn.BatchNorm2d(4), nn.ReLU(inplace=False))
self.block3 = nn.Sequential(nn.Conv2d(4, 8, kernel_size=3), nn.BatchNorm2d(8), nn.ReLU(inplace=True))
self.head = nn.Sequential(nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten(), nn.Linear(8, 10))
def forward(self, x):
if isinstance(x, torch.Tensor):
return self.head(self.block3(self.block2(self.block1(x))))
elif isinstance(x, list):
ys = [self.head(self.block3(self.block2(self.block1(e)))) for e in x]
return torch.cat(ys, dim=0)
def _create_model(
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
):
model = Model2() if with_model2 else Model()
fsdp_config = None
if with_sync_bn:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
fsdp_config = {
"mixed_precision": False,
"flatten_parameters": False,
"reshard_after_forward": False,
"bucket_cap_mb": 0,
"force_input_to_fp32": True, # SyncBN needs this.
}
if with_fsdp and wrap_bn:
model.block1 = auto_wrap_bn(model.block1, single_rank_pg=False, fsdp_config=fsdp_config)
model.block2 = auto_wrap_bn(model.block2, single_rank_pg=False, fsdp_config=fsdp_config)
if with_model2:
model.block3 = auto_wrap_bn(model.block3, single_rank_pg=False, fsdp_config=fsdp_config)
if with_checkpoint:
model.block2 = checkpoint_wrapper(model.block2)
if with_model2:
model.block3 = checkpoint_wrapper(model.block3)
if with_fsdp:
with enable_wrap(
wrapper_cls=FSDP,
flatten_parameters=flatten,
mixed_precision=mixed_precision,
compute_dtype=torch.float32,
fp32_reduce_scatter=fp32_reduce_scatter,
bucket_cap_mb=bucket_cap_mb,
):
model.block1 = wrap(model.block1)
model.block2 = wrap(model.block2)
if with_model2:
model.block3 = wrap(model.block3)
model.head = wrap(model.head)
return model
def _distributed_worker(
gpu_id,
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
files,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
):
filename, filename_rpc = files[:2]
filename_loss = files[2:]
torch.cuda.set_device(gpu_id)
rank = gpu_id
result = dist_init(rank, world_size, filename, filename_rpc)
assert result, "Dist init failed"
# use False below to debug since error msg is not as good with cudnn.
torch.backends.cudnn.enabled = True
# these make things deterministic.
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Ensure we have multiple forward passes.
batch = [
torch.randn(size=(2, 3, 16, 16)).cuda(),
torch.randn(size=(2, 3, 9, 9)).cuda(),
torch.randn(size=(2, 3, 9, 9)).cuda(),
]
if mixed_precision and not with_fsdp:
batch = [x.half() for x in batch]
model = _create_model(
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
)
model = model.cuda()
if with_fsdp:
model = FSDP(
model,
flatten_parameters=flatten,
mixed_precision=mixed_precision,
compute_dtype=torch.float32,
fp32_reduce_scatter=fp32_reduce_scatter,
bucket_cap_mb=bucket_cap_mb,
)
model.set_gradient_divide_factors(1.0, 2.0, True)
no_sync_context = contextlib.suppress()
else:
# With DDP, we need no_sync and manual gradient reduction below because
# it can't handle multiple forward pass + checkpointing otherwise.
model = DistributedDataParallel(model, device_ids=[gpu_id])
no_sync_context = model.no_sync()
mp_context = contextlib.suppress()
if mixed_precision:
mp_context = torch.cuda.amp.autocast(enabled=True)
if gpu_id == 0:
print(model)
target = torch.tensor([0, 1, 2, 3, 4, 5], dtype=torch.long).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
losses = {}
i = 0
with no_sync_context:
for iteration in range(3):
with mp_context:
out = model(batch)
loss = criterion(out, target)
print("Loss", iteration, ":", loss.item())
losses[f"iter_{i}"] = loss
i += 1
optimizer.zero_grad()
loss.backward()
# Manual grad reduction, no autocast.
if not with_fsdp:
for p in model.parameters():
dist.all_reduce(p.grad.data)
p.grad.data.div_(2.0)
# Stepping, no autocast
optimizer.step()
# Due to dist.all_reduce code block above with ddp.no_sync, we seem to hit a bug
# in DDP where tensor.cpu() and torch.save() calls both hang. FSDP is not affected.
# Therefore, we have to compare losses here instead of states.
with open(filename_loss[rank], "wb") as f:
pickle.dump(losses, f)
teardown()
_result_cache = {}
def _get_cached_results(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
):
"""Cache the training to save time. For DDP, flatten, wrap_bn etc. doesn't matter, so
the results can be cached.
"""
if not with_fsdp:
flatten = None
wrap_bn = None
fp32_reduce_scatter = None
key = (
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
)
global _result_cache
if key not in _result_cache:
# Get 4 files: 2 for dist_init and 2 for each rank to save the losses.
with temp_files_ctx(num=2 + world_size) as temp_files:
mp.spawn(
_distributed_worker,
(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
temp_files,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
),
nprocs=world_size,
)
final_losses = {}
for rank in range(world_size):
with open(temp_files[2 + rank], "rb") as f:
for iter_key, loss in pickle.load(f).items():
final_losses[f"rank_{rank}_{iter_key}"] = loss
_result_cache[key] = final_losses
return _result_cache[key]
@skip_if_single_gpu
@pytest.mark.parametrize("precision", ["full", "mixed"])
@pytest.mark.parametrize("flatten", ["flatten", "no_flatten"])
@pytest.mark.parametrize("wrap_bn", ["auto_wrap_bn", "no_auto_wrap_bn"])
@pytest.mark.parametrize("model_type", ["model1", "model2"])
@pytest.mark.parametrize("bn_type", ["bn", "sync_bn"])
@pytest.mark.skipif(
torch_version() >= (1, 14, 0),
reason="Tests broke in Pytorch pre-release version 1.14",
)
def test_multiple_forward_checkpoint(precision, flatten, wrap_bn, model_type, bn_type):
mixed_precision = precision == "mixed"
flatten = flatten == "flatten"
wrap_bn = wrap_bn == "auto_wrap_bn"
fp32_reduce_scatter = True if mixed_precision else None
with_model2 = model_type == "model2"
with_sync_bn = bn_type == "sync_bn"
if torch_version() >= (1, 7, 0) and torch_version() < (1, 8, 0) and with_sync_bn:
# SyncBN is buggy in 1.7, errors like:
# E File "/home/circleci/venv/lib/python3.8/site-packages/torch/nn/modules/_functions.py", line 13, in forward
# E dtype=running_mean.dtype,
# E AttributeError: 'NoneType' object has no attribute 'dtype'
pytest.skip("SyncBatchNorm in 1.7 is buggy")
if with_sync_bn and not wrap_bn:
pytest.skip("SyncBatchNorm requires auto_wrap_bn")
if torch_version() < (1, 8, 0) and flatten:
# 1.6 and 1.7 throws this error:
# RuntimeError: Trying to backward through the graph a second time, but the saved
# intermediate results have already been freed. Specify retain_graph=True when calling
# backward the first time.
pytest.skip("older pytorch throws error when flatten is used")
world_size = 2
expected_losses = None
# Ensure ddp == fsdp when modules are called multiple times per forward pass with/without checkpointing, forward
# counters and reducer bucketing.
#
# The bucketing check exists because the asynchronous gradient reduction it induces can interact with multiple
# forward passes in complex ways. For example, in the midst of a sharded backward pass, `parameter.grad` may only be
# `None` or an unsharded gradient tensor. The sharded tensor is then set at the end of the backwards pass. But a
# unit test with bucketing enabled might not catch violations of this invariant. For very small models, like the
# kind used in this unit test, bucketing will delay gradient reduction until after all the gradient computation is
# done. If the reduction incorrectly sets `.grad` to the _sharded_ variant, the test might not fail, since the
# gradient computations have already happened. Toggling bucketing helps verify that gradient reduction and
# computation interact correctly.
combinations = []
for with_fsdp in [False, True]:
for with_checkpoint in [False, True]:
if not with_fsdp and with_checkpoint:
continue
for with_bucketing in [False, True]:
if not with_fsdp and with_bucketing:
continue
combinations.append((with_fsdp, with_checkpoint, with_bucketing))
print("")
print("Testing the following configurations:")
for with_fsdp, with_checkpoint, with_bucketing in combinations:
print(f" fsdp {with_fsdp} ckpt {with_checkpoint} bucketing {with_bucketing}")
for with_fsdp, with_checkpoint, with_bucketing in combinations:
if with_bucketing:
bucket_cap_mb = 25
else:
bucket_cap_mb = 0
final_losses = _get_cached_results(
world_size,
with_model2,
with_sync_bn,
with_fsdp,
with_checkpoint,
mixed_precision,
flatten,
wrap_bn,
fp32_reduce_scatter,
bucket_cap_mb,
)
if expected_losses is None:
expected_losses = final_losses
else:
print(f"checking: fsdp {with_fsdp} ckpt {with_checkpoint} bucketing {with_bucketing} with ddp+no_ckpt")
def check(exp, res):
assert list(exp.keys()) == list(res.keys()), f"{list(exp.keys())} vs. {list(res.keys())}"
rtol = 1e-4
atol = 1e-5
if with_model2 and mixed_precision and torch_version() >= (1, 9, 0):
# On CI, with longer model2, mixed precsion and 1.9, even ddp vs. ddp+ckpt has
# larger errors.
rtol = 1e-3
atol = 1e-4
for key in exp.keys():
exp_loss = exp[key]
res_loss = res[key]
torch.testing.assert_allclose(exp_loss, res_loss, rtol=rtol, atol=atol)
check(expected_losses, final_losses)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Any, Callable
import pytest
import torch
from fairscale.nn.model_parallel import destroy_model_parallel
@pytest.fixture(autouse=True)
def manual_seed_zero() -> None:
torch.manual_seed(0)
def cuda_sleep_impl(seconds, cycles_per_ms):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
@pytest.fixture(scope="session")
def cuda_sleep() -> Callable:
# Warm-up CUDA.
torch.empty(1, device="cuda")
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return functools.partial(cuda_sleep_impl, cycles_per_ms=cycles_per_ms)
def pytest_report_header() -> str:
return f"torch: {torch.__version__}"
def pytest_runtest_setup(item: Any) -> None:
print("setup mpi function called")
def pytest_runtest_teardown(item: Any) -> None:
if "OMPI_COMM_WORLD_RANK" in os.environ:
destroy_model_parallel()
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
try:
torch.distributed.rpc.shutdown()
except Exception:
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from fairscale.nn import Pipe
def test_simple_linears():
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
p.grad = None
inputs = torch.rand(8, 1)
model = nn.Sequential(
nn.Linear(1, 2),
nn.Linear(2, 4),
nn.Linear(4, 2),
nn.Linear(2, 1),
)
# Without Pipe
outputs = model(inputs)
loss = outputs.mean()
loss.backward()
grad_without_pipe = sum_grad(model.parameters())
zero_grad(model.parameters())
# With Pipe
model = Pipe(model, [2, 2], devices=["cpu", "cpu"], chunks=4)
outputs = model(inputs)
loss = outputs.mean()
loss.backward()
grad_with_pipe = sum_grad(model.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.nn.pipe import Pipe
def test_inplace_on_requires_grad():
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = Pipe(model, [1, 1], devices=["cpu", "cpu"], checkpoint="always")
x = torch.rand(1)
y = model(x)
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_on_not_requires_grad():
# In-place operation on a tensor not requiring grad doesn't cause a
# RuntimeError. Currently, we cannot detect this case.
model = nn.Sequential(nn.ReLU(inplace=True))
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
x = torch.rand(1)
y = model(x)
del model
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
@pytest.mark.xfail(strict=True)
def test_inplace_incorrect_grad():
class M(nn.Module):
def forward(self, foo_bar):
# 'foo' requires grad but 'bar' does not. In-place operation on
# 'bar' won't cause a RuntimeError.
foo, bar = foo_bar
# add_(1) is not idempotent, in contrast to relu_(). If it is
# executed multiple times, it will accumulates each difference onto
# 'bar'.
bar.add_(1)
# 'bar' is still captured by checkpointing. 'foo' will get
# incorrect grad.
return foo * bar
model = nn.Sequential(M())
model = Pipe(model, [1], devices=["cpu"], checkpoint="always")
foo = torch.tensor([1.0], requires_grad=True)
bar = torch.tensor([1.0])
output = model((foo, bar))
del model
output.backward()
# The gradient of 'foo' should be 2, but it is 3 actually because
# bar.add_(1) was executed twice due to checkpointing.
assert foo.grad.item() == 2.0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from fairscale.nn.pipe.copy import Copy, Wait
from fairscale.nn.pipe.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None):
device = get_device(prev_stream)
with use_stream(prev_stream):
if is_cuda(prev_stream):
cuda_sleep(0.5)
x = torch.ones(100, device=device, requires_grad=True)
(y,) = Copy.apply(prev_stream, next_stream, x)
(y,) = Wait.apply(prev_stream, next_stream, x)
with use_stream(next_stream):
assert torch.allclose(y.sum(), torch.tensor(100.0, device=device))
y.norm().backward()
with use_stream(prev_stream):
assert torch.allclose(x.grad.sum(), torch.tensor(10.0, device=device))
def test_copy_wait_cpu_cpu():
prev_stream = CPUStream
next_stream = CPUStream
_test_copy_wait(prev_stream, next_stream)
@skip_if_no_cuda
def test_copy_wait_cpu_cuda(cuda_sleep):
prev_stream = CPUStream
next_stream = current_stream(torch.device("cuda"))
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
@skip_if_no_cuda
def test_copy_wait_cuda_cpu(cuda_sleep):
prev_stream = current_stream(torch.device("cuda"))
next_stream = CPUStream
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
@skip_if_no_cuda
def test_copy_wait_cuda_cuda(cuda_sleep):
prev_stream = current_stream(torch.device("cuda"))
next_stream = new_stream(torch.device("cuda"))
_test_copy_wait(prev_stream, next_stream, cuda_sleep)
def test_wait_multiple_tensors():
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
a, b = Wait.apply(CPUStream, CPUStream, a, b)
assert a.grad_fn is b.grad_fn
assert a.grad_fn.__class__ is Wait._backward_cls
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests/__init__.py makes pytest can import the application without custom sys.path or PYTHONPATH.
# See also: https://docs.pytest.org/en/latest/goodpractices.html
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import weakref
import pytest
import torch
from fairscale.nn.pipe.dependency import Fork, Join, fork, join
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_fork_join():
logs = []
class Log(torch.autograd.Function):
@staticmethod
def forward(ctx, number, tensor):
ctx.number = number
return tensor.detach()
@staticmethod
def backward(ctx, grad):
logs.append(ctx.number)
return None, grad
a = torch.rand(1, device="cpu", requires_grad=True)
b = torch.rand(1, device="cuda", requires_grad=True)
a = Log.apply(1, a)
a, phony = fork(a)
b = join(a, phony)
b = Log.apply(2, b)
b = b.to("cpu")
(a + b).backward()
assert logs == [2, 1]
def test_fork_join_enable_grad():
x = torch.rand(1, requires_grad=True)
with torch.enable_grad():
x2, p = fork(x)
assert p.requires_grad
assert x2 is not x
x = x2
assert x.requires_grad
assert p.requires_grad
assert x.grad_fn.__class__ is Fork._backward_cls
assert p.grad_fn.__class__ is Fork._backward_cls
with torch.enable_grad():
x2 = join(x, p)
assert x2 is not x
x = x2
assert x.requires_grad
assert x.grad_fn.__class__ is Join._backward_cls
def test_fork_join_no_grad(monkeypatch):
def do_not_apply(*args):
raise AssertionError("Function.apply called")
monkeypatch.setattr("torch.autograd.Function.apply", do_not_apply)
x = torch.rand(1, requires_grad=True)
with torch.no_grad():
x2, p = fork(x)
assert not p.requires_grad
assert x2 is x
x = x2
with torch.no_grad():
x2 = join(x, p)
assert x2 is x
x = x2
def test_fork_leak():
leak = None
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad):
nonlocal leak
leak = weakref.ref(ctx)
return grad
x = torch.rand(1, requires_grad=True)
x = F.apply(x)
x, phony = fork(x)
x = join(x, phony)
x.backward()
del x, phony
assert leak() is None
def test_join_when_fork_not_requires_grad():
x = torch.rand(2, 1)
a, b = x.chunk(2)
assert not a.requires_grad
a, p = fork(a)
assert not a.requires_grad
assert not p.requires_grad
assert not b.requires_grad
b = join(b, p)
assert not b.requires_grad
def test_join_when_fork_requires_grad():
x = torch.rand(2, 1)
a, b = x.chunk(2)
a.requires_grad_()
assert a.requires_grad
a, p = fork(a)
assert a.requires_grad
assert p.requires_grad
assert not b.requires_grad
b = join(b, p)
assert b.requires_grad
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import torch.cuda
from fairscale.nn.pipe.microbatch import Batch, check, gather, scatter
def test_batch_atomic():
x = torch.tensor(42)
b = Batch(x, 0)
assert b.atomic
assert b.tensor is x
with pytest.raises(AttributeError):
b.tensors
assert list(b) == [x]
assert len(b) == 1
assert b[0] is x
def test_batch_non_atomic():
x, y = torch.tensor(42), torch.tensor(21)
b = Batch((x, y), 0)
assert not b.atomic
with pytest.raises(AttributeError):
b.tensor
assert b.tensors == (x, y)
assert list(b) == [x, y]
assert len(b) == 2
assert b[0] is x
assert b[1] is y
def test_batch_call():
a = Batch(torch.tensor(42), 0)
b = Batch((torch.tensor(42), torch.tensor(21)), 0)
def f(x):
return x
assert a.call(f).atomic
assert not b.call(f).atomic
def test_batch_setitem_by_index():
a = Batch(torch.tensor(42), 0)
b = Batch((torch.tensor(42), torch.tensor(21)), 0)
a[0] = torch.tensor(0)
b[0] = torch.tensor(0)
assert a.atomic
assert a[0].item() == 0
assert not b.atomic
assert len(b) == 2
assert b[0].item() == 0
assert b[1].item() == 21
def test_batch_setitem_by_slice():
a = Batch(torch.tensor(42), 0)
b = Batch((torch.tensor(42), torch.tensor(21)), 0)
a[:] = (torch.tensor(0),)
b[:] = (torch.tensor(0),)
assert a.atomic
assert a[0].item() == 0
assert not b.atomic
assert len(b) == 1
assert b[0].item() == 0
def test_check():
check(torch.tensor(42))
check((torch.tensor(4), torch.tensor(2)))
with pytest.raises(TypeError):
check(42)
with pytest.raises(TypeError):
check("str")
with pytest.raises(TypeError):
check((torch.tensor(4), 2))
def test_gather_tensors():
a = torch.zeros(1, 1)
b = torch.zeros(1, 1)
ab = gather([Batch(a, 0), Batch(b, 0)])
assert ab.size() == (2, 1)
def test_gather_tuples():
a = (torch.zeros(1, 1), torch.zeros(2, 2))
b = (torch.zeros(1, 1), torch.zeros(2, 2))
ab = gather([Batch(a, 0), Batch(b, 0)])
assert isinstance(ab, tuple)
assert ab[0].size() == (2, 1)
assert ab[1].size() == (4, 2)
def test_scatter_tensor():
ab = torch.zeros(2, 1)
a, b = scatter(ab, chunks=2)
assert a.tensor.size() == (1, 1)
assert b.tensor.size() == (1, 1)
def test_scatter_tuple():
ab = (torch.zeros(2, 1), torch.zeros(4, 2))
a, b = scatter(ab, chunks=2)
assert a.tensors[0].size() == (1, 1)
assert b.tensors[0].size() == (1, 1)
assert a.tensors[1].size() == (2, 2)
assert b.tensors[1].size() == (2, 2)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from itertools import chain
import pytest
import torch
from torch import nn, optim
from fairscale.nn.pipe.batchnorm import DeferredBatchNorm
CHUNKS = 4
def tilt_dist(input):
# Tilt variance by channel.
rgb = input.transpose(0, 1)
rgb[0] *= 1
rgb[1] *= 10
rgb[2] *= 100
# Tilt mean by single batch.
for i, single in enumerate(input):
single += 2**i
return input
def chunked_forward(model, input, chunks=CHUNKS):
output_chunks = []
for chunk in input.chunk(chunks):
output_chunks.append(model(chunk))
return torch.cat(output_chunks)
@pytest.mark.parametrize("chunks", [1, 4])
@pytest.mark.parametrize("input_requires_grad", [True, False])
def test_transparency(chunks, input_requires_grad):
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=chunks)
input1 = torch.rand(16, 3, 224, 224)
input1 = tilt_dist(input1)
input2 = input1.clone()
input1.requires_grad = input_requires_grad
input2.requires_grad = input_requires_grad
output1 = chunked_forward(bn, input1, chunks=chunks)
output2 = chunked_forward(dbn, input2, chunks=chunks)
assert torch.allclose(output1, output2, atol=1e-4)
output1.mean().backward()
output2.mean().backward()
assert torch.allclose(bn.weight.grad, dbn.weight.grad, atol=1e-4)
if input_requires_grad:
assert input1.grad is not None
assert input2.grad is not None
assert torch.allclose(input1.grad, input2.grad, atol=1e-4)
@pytest.mark.parametrize("momentum", [0.1, None])
def test_running_stats(momentum):
bn = nn.BatchNorm2d(3, momentum=momentum)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
bn(input)
chunked_forward(dbn, input)
assert torch.allclose(bn.running_mean, dbn.running_mean, atol=1e-4)
assert torch.allclose(bn.running_var, dbn.running_var, atol=1e-4)
def test_convert_deferred_batch_norm():
bn = nn.BatchNorm2d(3, track_running_stats=False)
bn = DeferredBatchNorm.convert_deferred_batch_norm(bn, chunks=CHUNKS)
assert type(bn) is nn.BatchNorm2d # because of track_running_stats=False
dbn = DeferredBatchNorm(3, chunks=CHUNKS)
dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS)
assert dbn is dbn_again
dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS + 1)
assert dbn is not dbn_again # because of different chunks
def test_eval():
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
bn(input)
chunked_forward(dbn, input)
bn.eval()
dbn.eval()
assert torch.allclose(bn(input), dbn(input), atol=1e-4)
def test_optimize():
bn = nn.BatchNorm2d(3)
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=1.0)
for i in range(5):
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
# train
y = bn(input)
a = y.sum()
a.backward()
y = chunked_forward(dbn, input)
b = y.sum()
b.backward()
opt.step()
# eval
bn.eval()
dbn.eval()
with torch.no_grad():
assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10**i))
def test_conv_bn():
bn = nn.Sequential(nn.Conv2d(3, 3, 1), nn.BatchNorm2d(3))
dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=0.1)
# 1st step
a = bn(input)
b = chunked_forward(dbn, input)
# Outputs are different. (per-mini-batch vs. per-micro-batch)
assert not torch.allclose(a, b)
a.sum().backward()
b.sum().backward()
opt.step()
opt.zero_grad()
# Conv layers are also trained differently because of their different outputs.
assert not torch.allclose(bn[0].weight, dbn[0].weight)
# But BNs track identical running stats.
assert torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4)
assert torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3)
# 2nd step
a = bn(input)
b = chunked_forward(dbn, input)
a.sum().backward()
b.sum().backward()
# BNs can't track identical running stats due to the different conv layers.
assert not torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4)
assert not torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3)
def test_input_requiring_grad():
dbn = DeferredBatchNorm(3, chunks=CHUNKS)
input = torch.rand(16, 3, 224, 224)
input = tilt_dist(input)
input.requires_grad = True
chunked_forward(dbn, input)
assert not dbn.sum.requires_grad
assert dbn.sum.grad_fn is None
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import pytest
import torch
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.stream import CPUStream
from fairscale.nn.pipe.worker import Task, spawn_workers
class fake_device:
"""A test double for :class:`torch.device`. Every fake device is different
with each other.
"""
type = "fake"
index = None
def test_join_running_workers():
count = 0
def counter():
nonlocal count
time.sleep(0.1)
count += 1
return Batch((), 0)
with spawn_workers([fake_device() for _ in range(10)]) as (in_queues, out_queues):
def call_in_worker(i, f):
task = Task(CPUStream, compute=f, finalize=None)
in_queues[i].put(task)
for i in range(10):
call_in_worker(i, counter)
# There's no nondeterminism because 'spawn_workers' joins all running
# workers.
assert count == 10
def test_join_running_workers_with_exception():
class ExpectedException(Exception):
pass
count = 0
def counter():
nonlocal count
time.sleep(0.1)
count += 1
return Batch((), 0)
with pytest.raises(ExpectedException):
with spawn_workers([fake_device() for _ in range(10)]) as (in_queues, out_queues):
def call_in_worker(i, f):
task = Task(CPUStream, compute=f, finalize=None)
in_queues[i].put(task)
for i in range(10):
call_in_worker(i, counter)
raise ExpectedException
# There's no nondeterminism because only 1 task can be placed in input
# queues.
assert count == 10
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
def log_thread_id():
thread_id = threading.current_thread().ident
thread_ids.add(thread_id)
return Batch((), 0)
with spawn_workers([fake_device() for _ in range(2)]) as (in_queues, out_queues):
for i in range(2):
t = Task(CPUStream, compute=log_thread_id, finalize=None)
in_queues[i].put(t)
for i in range(2):
out_queues[i].get()
assert len(thread_ids) == 2
def test_compute_success():
"""Task.compute returns (True, (task, batch)) on success."""
def _42():
return Batch(torch.tensor(42), 0)
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=_42, finalize=None)
in_queues[0].put(t)
ok, (task, batch) = out_queues[0].get()
assert ok
assert task is t
assert isinstance(batch, Batch)
assert batch[0].item() == 42
def test_compute_exception():
"""Task.compute returns (False, exc_info) on failure."""
def zero_div():
0 / 0
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=zero_div, finalize=None)
in_queues[0].put(t)
ok, exc_info = out_queues[0].get()
assert not ok
assert isinstance(exc_info, tuple)
assert issubclass(exc_info[0], ZeroDivisionError)
@pytest.mark.parametrize("grad_mode", [True, False])
def test_grad_mode(grad_mode):
def detect_grad_enabled():
x = torch.rand(1, requires_grad=torch.is_grad_enabled())
return Batch(x, 0)
with torch.set_grad_enabled(grad_mode):
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
task = Task(CPUStream, compute=detect_grad_enabled, finalize=None)
in_queues[0].put(task)
ok, (_, batch) = out_queues[0].get()
assert ok
assert batch[0].requires_grad == grad_mode
def test_worker_per_device():
cpu = torch.device("cpu")
cpu0 = torch.device("cpu", index=0)
fake1 = fake_device()
fake2 = fake_device()
with spawn_workers([cpu, cpu, cpu0, fake1, fake2]) as (in_queues, out_queues):
assert len(in_queues) == len(out_queues) == 5
# 0: cpu, 1: cpu, 2: cpu0
assert in_queues[0] is in_queues[1] is in_queues[2]
assert out_queues[0] is out_queues[1] is out_queues[2]
# 3: fake1, 4: fake2
assert in_queues[3] is not in_queues[4]
assert out_queues[3] is not out_queues[4]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from fairscale.nn.pipe.phony import get_phony
def test_phony_size():
p = get_phony(torch.device("cpu"), requires_grad=False)
assert p.size() == (1,)
def test_phony_requires_grad():
p1 = get_phony(torch.device("cpu"), requires_grad=True)
p2 = get_phony(torch.device("cpu"), requires_grad=False)
assert p1.requires_grad
assert not p2.requires_grad
def test_cached_phony():
p1 = get_phony(torch.device("cpu"), requires_grad=True)
p2 = get_phony(torch.device("cpu"), requires_grad=True)
assert p1 is p2
p3 = get_phony(torch.device("cpu"), requires_grad=False)
p4 = get_phony(torch.device("cpu"), requires_grad=False)
assert p3 is p4
assert p1 is not p3
def test_phony_in_autograd_function():
class Phonify(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
phony = get_phony(input.device, requires_grad=False)
return phony.detach()
x = torch.rand(1, requires_grad=True)
p1 = Phonify.apply(x)
p2 = get_phony(torch.device("cpu"), requires_grad=True)
assert p1 is not p2
assert p1.grad_fn is not None
assert p2.grad_fn is None
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
import torch.nn.functional as F
from fairscale.fair_dev.testing.testing import skip_if_single_gpu
from fairscale.nn.pipe import Pipe
def test_python_autograd_function():
# A Python autograd function might fail with this error:
#
# RuntimeError: Returning Variables sharing storage with other Variables
# that require grad is not supported in Python functions. Please submit a
# feature request if you hit this error.
#
# It doesn't look like an essential restriction. But it happens on the
# current PyTorch version. To avoid it, we should detach the tensor before
# returning by identity autograd functions, such as Wait, Fork, and Join.
#
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad):
return grad
class M(nn.Module):
def forward(self, input):
return Identity.apply(input)
model = nn.Sequential(M(), M())
model = Pipe(model, [1, 1], devices=["cpu", "cpu"], checkpoint="always")
x = torch.rand(42)
y = model(x)
assert torch.allclose(x, y)
def test_exception_no_hang():
# In v0.0.2, once a failed partition receives a normal message
# (non-closing) for the next micro-batch, a hang occured. The reason was
# that a failed partition didn't call in_queue.task_done() on a normal
# message. So the former partition was blocked at out_queue.join() for the
# next of next micro-batch.
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Raise())
model = Pipe(model, [1, 1, 1], devices=["cpu", "cpu", "cpu"], chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
@skip_if_single_gpu
def test_tuple_wait(cuda_sleep):
# In v0.0.3, Wait is applied to only the first tensor on a micro-batch.
# Under this behavior, if checkpointing was disabled, there's a possibility
# that gradient accumulations on other tensors are not synchronized
# properly to the copy stream.
class Sleep(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.detach()
@staticmethod
def backward(ctx, grad):
with torch.cuda.device(grad.device):
cuda_sleep(0.05)
return grad
class Layer1(nn.Module):
def forward(self, pair):
a, b = pair
return a * 1, b * 2, b * 3
class Layer2(nn.Module):
def forward(self, triple):
a, b, c = triple
b = Sleep.apply(b)
return a + b + c
model = nn.Sequential(Layer1(), Layer2())
model = Pipe(model, [1, 1], devices=[0, 1], chunks=32, checkpoint="never")
a = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
b = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
y = model((a, b))
y.norm().backward()
torch.cuda.synchronize(0)
torch.cuda.synchronize(1)
assert torch.isclose(b.grad.norm().cpu(), torch.tensor(5.000))
def test_parallel_randoms():
class Dropouts(nn.Module):
def forward(self, x):
for _ in range(100):
x = F.dropout(x, p=0.001)
return x
model = nn.Sequential(Dropouts(), Dropouts())
x = torch.rand(10, 10, requires_grad=True)
model = Pipe(model, [1, 1], devices=["cpu", "cpu"], chunks=10, checkpoint="always")
y = model(x)
y.norm().backward()
assert y.to(torch.bool).tolist() == x.grad.to(torch.bool).tolist()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing Pipe Module Parity
"""
import contextlib
import copy
import numpy as np
import pytest
import torch
from fairscale.fair_dev.testing.testing import skip_if_single_gpu
from fairscale.nn import Pipe
def _get_model(num_inputs=2, num_hidden=20, num_outputs=2):
num_layers = torch.cuda.device_count() - 2
model = torch.nn.Sequential(
torch.nn.Linear(num_inputs, num_hidden),
*([torch.nn.Linear(num_hidden, num_hidden) for _ in range(num_layers)]),
torch.nn.Linear(num_hidden, num_outputs),
)
return model
def _check_parity(rmodel, pmodel, ropt, popt, rloss, ploss):
for pparams, rparams in zip(pmodel.parameters(), rmodel.parameters()):
assert torch.allclose(pparams.cuda(), rparams, atol=1e-2), f"Model params are different {oparams} {rparams}"
for p_pg, reg_pg in zip(popt.param_groups, ropt.param_groups):
for p_pg, reg_pg in zip(p_pg["params"], reg_pg["params"]):
assert torch.allclose(
p_pg.cuda(), reg_pg, atol=1e-2
), f"Model parameters differ in between Pipe and Vanilla {[o_pg]} {reg_pg}"
for p_buf, reg_buf in zip(pmodel.buffers(), rmodel.buffers()):
assert torch.allclose(p_buf.cuda(), reg_buf, atol=1e-2), "Model buffers differ in between Pipe and Vanilla."
def _get_fp16_context(use_fp16=False):
if use_fp16:
return torch.cuda.amp.autocast()
else:
return contextlib.nullcontext()
def _train(model, optimizer, use_fp16):
inputs = torch.ones(32, 2).cuda()
labels = torch.ones(32, 2)
loss_fn = torch.nn.MSELoss(reduction="sum")
model.train()
with _get_fp16_context(use_fp16):
pred = model(inputs)
loss = loss_fn(pred, labels.to(pred.device))
loss.backward()
optimizer.step()
return model, optimizer, loss
def _train_reg_model(model, use_fp16=False):
model = copy.deepcopy(model)
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
return _train(model, optimizer, use_fp16)
def _train_pipe_model(model, use_fp16=False, checkpoint="never", chunks=1):
model = copy.deepcopy(model)
model = Pipe(
model,
balance=[1] * torch.cuda.device_count(),
devices=list(range(torch.cuda.device_count())),
chunks=chunks,
checkpoint=checkpoint,
)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
return _train(model, optimizer, use_fp16)
@skip_if_single_gpu
@pytest.mark.parametrize("use_fp16", [True, False])
@pytest.mark.parametrize("checkpoint", ["always", "except_last", "never"])
@pytest.mark.parametrize("chunks", [1, 4])
def test_correctness(use_fp16, checkpoint, chunks):
torch.manual_seed(0)
np.random.seed(0)
if use_fp16 and not hasattr(torch.cuda.amp, "custom_fwd"):
pytest.skip(f"AMP APIs are not supported in torch version {torch.__version__}")
model = _get_model()
rmodel, ropt, rloss = _train_reg_model(model)
pmodel, popt, ploss = _train_pipe_model(
model,
use_fp16=use_fp16,
checkpoint=checkpoint,
chunks=chunks,
)
_check_parity(rmodel, pmodel, ropt, popt, rloss, ploss)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pytest
import torch
from torch import nn
from fairscale.nn.pipe.balance import balance_by_size, balance_by_time, blockpartition
from fairscale.nn.pipe.balance.profile import layerwise_sandbox
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
def test_blockpartition():
assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
def test_blockpartition_zeros():
assert blockpartition.solve([0, 0], partitions=2) == [[0], [0]]
def test_blockpartition_non_positive_partitions():
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=0)
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=-1)
def test_blockpartition_short_sequence():
with pytest.raises(ValueError):
blockpartition.solve([], partitions=1)
with pytest.raises(ValueError):
blockpartition.solve([42], partitions=2)
@pytest.mark.parametrize("device", devices)
def test_balance_by_time(device):
class Delay(nn.Module):
def __init__(self, seconds):
super().__init__()
self.seconds = seconds
def forward(self, x):
time.sleep(self.seconds)
return x
model = nn.Sequential(*[Delay(i / 100) for i in [1, 2, 3, 4, 5, 6]])
sample = torch.rand(1)
balance = balance_by_time(2, model, sample, device=device)
assert balance == [4, 2]
def test_balance_by_time_loop_resets_input():
# nn.Flatten was introduced at PyTorch 1.2.0.
class Flatten(nn.Module):
def forward(self, x):
return x.flatten(1)
model = nn.Sequential(nn.Conv2d(3, 2, 1), Flatten(), nn.Linear(128, 10))
sample = torch.rand(10, 3, 8, 8)
balance = balance_by_time(2, model, sample, device="cpu")
assert balance == [1, 2]
@skip_if_no_cuda
def test_balance_by_size_latent():
class Expand(nn.Module):
def __init__(self, times):
super().__init__()
self.times = times
def forward(self, x):
for i in range(self.times):
x = x + torch.rand_like(x, requires_grad=True)
return x
sample = torch.rand(10, 100, 100)
model = nn.Sequential(*[Expand(i) for i in [1, 2, 3, 4, 5, 6]])
balance = balance_by_size(2, model, sample)
assert balance == [4, 2]
model = nn.Sequential(*[Expand(i) for i in [6, 5, 4, 3, 2, 1]])
balance = balance_by_size(2, model, sample)
assert balance == [2, 4]
@skip_if_no_cuda
def test_balance_by_size_param():
model = nn.Sequential(*[nn.Linear(i + 1, i + 2) for i in range(6)])
sample = torch.rand(7, 1)
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [4, 2]
model = nn.Sequential(*[nn.Linear(i + 2, i + 1) for i in reversed(range(6))])
sample = torch.rand(1, 7)
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [2, 4]
@skip_if_no_cuda
def test_balance_by_size_param_scale():
class Tradeoff(nn.Module):
def __init__(self, param_size, latent_size):
super().__init__()
self.fc = nn.Linear(param_size, param_size)
self.latent_size = latent_size
def forward(self, x):
for i in range(self.latent_size):
x = x + torch.rand_like(x, requires_grad=True)
return x
model = nn.Sequential(
Tradeoff(param_size=1, latent_size=6),
Tradeoff(param_size=2, latent_size=5),
Tradeoff(param_size=3, latent_size=4),
Tradeoff(param_size=4, latent_size=3),
Tradeoff(param_size=5, latent_size=2),
Tradeoff(param_size=6, latent_size=1),
)
sample = torch.rand(1, requires_grad=True)
balance = balance_by_size(2, model, sample, param_scale=0)
assert balance == [2, 4]
balance = balance_by_size(2, model, sample, param_scale=100)
assert balance == [4, 2]
@pytest.mark.parametrize("device", devices)
def test_layerwise_sandbox(device):
model = nn.Sequential(nn.Conv2d(3, 3, 1), nn.BatchNorm2d(3))
model.eval()
for layer in layerwise_sandbox(model, torch.device(device)):
assert layer.training
assert all(p.device.type == device for p in layer.parameters())
assert all(not l.training for l in model)
assert all(p.device.type == "cpu" for p in model.parameters())
@pytest.mark.parametrize("device", devices)
def test_sandbox_during_profiling(device):
model = nn.Sequential(nn.BatchNorm2d(3))
before = {k: v.clone() for k, v in model.state_dict().items()}
sample = torch.rand(1, 3, 10, 10)
balance_by_time(1, model, sample, device=device)
after = model.state_dict()
assert before.keys() == after.keys()
for key, value in before.items():
assert torch.allclose(after[key], value), key
def test_not_training():
class AssertTraining(nn.Module):
def forward(self, x):
assert self.training
return x
model = nn.Sequential(AssertTraining())
model.eval()
assert not model.training
sample = torch.rand(1)
balance_by_time(1, model, sample, device="cpu")
assert not model.training
def test_balance_by_time_tuple():
class Twin(nn.Module):
def forward(self, x):
return x, x.detach()
class Add(nn.Module):
def forward(self, a_b):
a, b = a_b
return a + b
model = nn.Sequential(Twin(), Add())
sample = torch.rand(1, requires_grad=True)
balance_by_time(1, model, sample, device="cpu")
@skip_if_no_cuda
def test_balance_by_size_tuple():
class Twin(nn.Module):
def forward(self, x):
return x, x.detach()
class Add(nn.Module):
def forward(self, a_b):
a, b = a_b
return a + b
model = nn.Sequential(Twin(), Add())
sample = torch.rand(1, requires_grad=True)
balance_by_size(1, model, sample)
def test_already_has_grad():
model = nn.Sequential(nn.Conv2d(3, 3, 1))
sample = torch.rand(1, 3, 32, 32)
model(sample).norm().backward()
with pytest.raises(ValueError, match="some parameter already has gradient"):
balance_by_time(1, model, sample, device="cpu")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import torch
from torch import nn
from fairscale.nn.pipe import Pipe
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_parameters():
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, balance=[1], devices=["cpu"], chunks=1)
assert list(pipe.parameters()) != []
def test_public_attrs():
class MyString:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, balance=(1,), devices=("cpu",), chunks=42.000, checkpoint=MyString("always"))
assert pipe.balance == [1]
assert pipe.devices == [torch.device("cpu")]
assert pipe.chunks == 42
assert isinstance(pipe.chunks, int)
assert pipe.checkpoint == "always"
assert isinstance(pipe.checkpoint, str)
@pytest.mark.parametrize("balance", [[2], [1, 1]])
def test_sequential_like(balance):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model, balance, devices=["cpu", "cpu"])
assert len(model) == 2
assert list(model) == [a, b]
assert model[0] is a
assert model[1] is b
with pytest.raises(IndexError):
_ = model[2]
assert model[-1] is b
assert model[-2] is a
def test_balance_wrong_length():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
with pytest.raises(ValueError):
Pipe(model, balance=[1])
with pytest.raises(ValueError):
Pipe(model, balance=[3])
def test_balance_less_than_1():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
with pytest.raises(ValueError):
Pipe(model, balance=[0, 2])
with pytest.raises(ValueError):
Pipe(model, balance=[-1, 3])
def test_chunks_less_than_1():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError):
Pipe(model, balance=[1], devices=["cpu"], chunks=0)
with pytest.raises(ValueError):
Pipe(model, balance=[1], devices=["cpu"], chunks=-1)
def test_too_few_devices():
model = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1))
with pytest.raises(IndexError):
# len(balance) > len(devices)
model = Pipe(model, balance=[1, 1, 1, 1], devices=["cpu"])
def test_batch_size_indivisible():
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, balance=[1], devices=["cpu"], chunks=4)
with pytest.warns(None) as record:
model(torch.rand(7, 1))
# Indivisible batch size is legal.
assert not record
def test_batch_size_small():
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, balance=[1], devices=["cpu"], chunks=4)
with pytest.warns(None) as record:
model(torch.rand(2, 1))
# Batch size smaller than chunks is legal.
assert not record
def test_checkpoint_mode():
def count_grad_fn(grad_fn, name, visited=set()):
if grad_fn in visited:
return 0
visited.add(grad_fn)
if grad_fn is None:
return 0
if grad_fn.__class__.__name__ == name:
return 1
counter = 0
for next_grad_fn, _ in grad_fn.next_functions:
counter += count_grad_fn(next_grad_fn, name, visited=visited)
return counter
model = nn.Sequential(nn.Linear(1, 1))
input = torch.rand(2, 1)
always = Pipe(model, balance=[1], devices=["cpu"], chunks=2, checkpoint="always")
except_last = Pipe(model, balance=[1], devices=["cpu"], chunks=2, checkpoint="except_last")
never = Pipe(model, balance=[1], devices=["cpu"], chunks=2, checkpoint="never")
always_output = always(input)
except_last_output = except_last(input)
never_output = never(input)
assert count_grad_fn(always_output.grad_fn, "CheckpointBackward") == 2
assert count_grad_fn(except_last_output.grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.grad_fn, "CheckpointBackward") == 0
def test_checkpoint_mode_invalid():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
Pipe(model, balance=[1], devices=["cpu"], chunks=2, checkpoint="INVALID_CHECKPOINT")
def test_checkpoint_mode_when_chunks_1():
model = nn.Sequential(nn.Linear(1, 1))
# All checkpoint modes are fine.
Pipe(model, balance=[1], devices=["cpu"], chunks=1, checkpoint="except_last")
Pipe(model, balance=[1], devices=["cpu"], chunks=1, checkpoint="always")
Pipe(model, balance=[1], devices=["cpu"], chunks=1, checkpoint="never")
def test_checkpoint_eval():
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, balance=[1], devices=["cpu"], chunks=2)
input = torch.rand(2, 1)
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.grad_fn, "RecomputeBackward")
def test_checkpoint_non_float_input():
class ForkNonFloat(nn.Module):
def forward(self, input):
return (input * 2, torch.tensor([False]))
class JoinNonFloat(nn.Module):
def forward(self, input):
return input[0] * 2
model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
model = Pipe(model, balance=[1, 1], devices=["cpu", "cpu"], chunks=1, checkpoint="always")
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
def test_no_grad():
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, balance=[1], devices=["cpu"], chunks=2)
input = torch.rand(2, 1)
latent = None
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partitions[0]
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
def test_exception():
class ExpectedException(Exception):
pass
class Raise(nn.Module):
def forward(self, *_):
raise ExpectedException()
model = nn.Sequential(Raise())
model = Pipe(model, balance=[1], devices=["cpu"], chunks=1)
with pytest.raises(ExpectedException):
model(torch.rand(1))
def test_exception_early_stop_asap():
"""Even the first partitions have finished to process, the partition before
the failed partition should be killed as soon as possible.
"""
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
counter = 0
class Counter(nn.Module):
def forward(self, x):
time.sleep(0.1)
nonlocal counter
counter += 1
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
model = Pipe(model, [1, 1, 1, 1], devices=["cpu", "cpu", "cpu", "cpu"], chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
# If the early stop doesn't work, it would be 3 instead.
assert counter == 2
def test_input_pair():
class Two(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, a_and_b):
a, b = a_and_b
return (self.fc_a(a), self.fc_b(b))
model = nn.Sequential(Two())
model = Pipe(model, balance=[1], devices=["cpu"], chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
a_out, b_out = model((a, b))
loss = (a_out + b_out).mean()
loss.backward()
assert a.grad is not None
assert b.grad is not None
def test_input_singleton():
class One(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, only_a):
(a,) = only_a
return (self.fc(a),)
model = nn.Sequential(One())
model = Pipe(model, balance=[1], devices=["cpu"], chunks=2)
a = torch.rand(10, 1, requires_grad=True)
(a_out,) = model((a,))
loss = a_out.mean()
loss.backward()
assert all(p.grad is not None for p in model.parameters())
assert a.grad is not None
def test_input_varargs():
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, balance=[1], devices=["cpu"])
a = torch.rand(1)
b = torch.rand(1)
# TypeError: forward() takes 2 positional arguments but 3 were given
with pytest.raises(TypeError):
model(a, b)
def test_non_tensor():
class NonTensor(nn.Module):
def forward(self, _):
return "hello"
model = nn.Sequential(NonTensor())
model = Pipe(model, balance=[1], devices=["cpu"])
x = torch.rand(1)
# TypeError: expected Tensor as element 0 in argument 0, but got str
with pytest.raises(TypeError):
model(x)
# TypeError: expected Tensor to scatter, but got str
with pytest.raises(TypeError):
model("hello")
def test_non_tensor_tuple():
class NonTensorTuple(nn.Module):
def forward(self, x):
return (x, "hello")
model = nn.Sequential(NonTensorTuple())
model = Pipe(model, balance=[1], devices=["cpu"])
x = torch.rand(1)
# TypeError: CheckpointBackward.forward: expected Variable (got str) for return value 1
with pytest.raises(TypeError):
model(x)
# TypeError: expected Tensor to scatter, but got str
with pytest.raises(TypeError):
model((x, "hello"))
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_deferred_batch_norm(checkpoint):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), balance=[1], devices=["cpu"], chunks=2, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).mean().backward()
bn(x).mean().backward()
assert torch.allclose(pipe[0].running_mean, bn.running_mean, atol=1e-4)
assert torch.allclose(pipe[0].running_var, bn.running_var, atol=1e-4)
@pytest.mark.parametrize("checkpoint", ["never", "always"])
def test_deferred_batch_norm_params(checkpoint):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), balance=[1], devices=["cpu"], chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).mean().backward()
bn(x).mean().backward()
assert pipe[0].weight.grad is not None
assert pipe[0].bias.grad is not None
assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)
def test_devices():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
c = nn.Linear(1, 1)
# There are extra two devices.
devices = ["cpu", "cpu", "cpu", "cpu", "cpu"]
model = nn.Sequential(a, b, c)
model = Pipe(model, [1, 1, 1], devices=devices)
cpu = torch.device("cpu")
# Extra devices must be discarded.
assert model.devices == [cpu, cpu, cpu]
def test_partitions():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model, [1, 1], devices=["cpu", "cpu"])
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], nn.Sequential)
assert isinstance(model.partitions[1], nn.Sequential)
assert "partitions.0.0.weight" in model.state_dict()
def test_deny_moving():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model, [1, 1], devices=["cpu", "cpu"])
# Moving is denied.
with pytest.raises(TypeError):
model.cuda()
with pytest.raises(TypeError):
model.cpu()
with pytest.raises(TypeError):
model.to(torch.device("cuda"))
with pytest.raises(TypeError):
model.to(0)
with pytest.raises(TypeError):
model.to("cuda")
with pytest.raises(TypeError):
model.to(device=0)
with pytest.raises(TypeError):
model.to(torch.rand(1))
with pytest.raises(TypeError):
model.to(tensor=torch.rand(1))
# Casting is allowed.
model.half()
model.to(torch.double)
model.to(dtype=torch.float)
def test_empty_module():
# Empty sequential module is not illegal.
model = nn.Sequential()
model = Pipe(model, [])
assert model(torch.tensor(42)) == torch.tensor(42)
assert model((torch.tensor(42),)) == (torch.tensor(42),)
# But only tensor or tensors is legal in Pipe.
with pytest.raises(TypeError):
model(42)
def test_named_children():
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
model = Pipe(model, [1, 1], devices=["cpu", "cpu"])
names = set(n for n, _ in model.named_modules())
assert "partitions.0.a" in names
assert "partitions.1.b" in names
# Pipe doesn't support __getattr__. Unlike nn.Sequential, Pipe requires
# several methods in its namespace.
with pytest.raises(AttributeError):
model.a
def test_recommend_auto_balance():
with pytest.raises(ValueError, match="fairscale.nn.pipe.balance"):
# balance is required
Pipe(nn.Sequential())
with pytest.raises(ValueError, match="fairscale.nn.pipe.balance"):
# module and sum of balance have differen length (module: 0, sum of balance: 1)
Pipe(nn.Sequential(), [1])
with pytest.raises(ValueError, match="fairscale.nn.pipe.balance"):
# module and sum of balance have different length (module: 2, sum of balance: 1)
Pipe(nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1)), [1])
def test_verify_module_non_sequential():
with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
Pipe(nn.Module(), [1])
def test_verify_module_duplicate_children():
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(conv, conv)
with pytest.raises(ValueError, match="module with duplicate children is not supported"):
Pipe(model, [1, 1])
@skip_if_no_cuda
def test_verify_module_duplicate_parameters_on_distinct_devices():
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
with pytest.raises(ValueError, match="module with duplicate parameters on distinct devices is not supported"):
Pipe(model, [1, 1], devices=["cpu", "cuda"])
def test_verify_module_duplicate_parameters_on_same_device():
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
Pipe(model, [1, 1], devices=["cpu", "cpu"])
def test_forward_lockstep():
timeline = []
class DelayedLog(nn.Module):
def __init__(self, j, seconds):
super().__init__()
self.i = 0
self.j = j
self.seconds = seconds
def forward(self, x):
time.sleep(self.seconds)
timeline.append((self.i, self.j))
self.i += 1
return x
model = nn.Sequential(DelayedLog(0, seconds=0), DelayedLog(1, seconds=0.1))
model = Pipe(model, balance=[1, 1], devices=["cpu", "cpu"], chunks=3)
model(torch.rand(3, 1))
# Expected timeline: (Logs are recorded at !)
#
# Partition #0: 0! 1! 2!
# Partition #1: 000! 111! 222!
#
assert timeline == [(0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (2, 1)]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Test checkpoint and PyTorch DDP interactions.
import os
import random
import tempfile
import numpy
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn import Linear, Sequential
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.checkpoint import checkpoint as torch_checkpoint
from fairscale.fair_dev.testing.testing import skip_if_no_cuda, skip_if_single_gpu
from fairscale.nn.pipe.checkpoint import Checkpointing, Function, TensorOrTensors
from fairscale.nn.pipe.microbatch import Batch
# This test is mainly for checking pytorch & checkpointing behavior. pipe's checkpointing
# code is tested already in another file. Therefore, we can run this test less frequently.
# We use getpid() in case random is seeded to be deterministic.
run_test = False
if os.getpid() % 100 == 42:
run_test = True
skip_if_not_needed = pytest.mark.skipif(not run_test, reason="Skipping due to test frequency")
def set_random_seed(seed: int) -> None:
"""Set random seed for reproducability."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
def pipe_checkpoint(function: Function, input: TensorOrTensors) -> TensorOrTensors:
"""Makes a checkpoint with a simple interface like
:func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug
:class:`Checkpoint` and :class:`Recompute` without boilerplate.
"""
batch = Batch(input, 0)
chk = Checkpointing(function, batch)
batch = chk.checkpoint()
chk.recompute(batch)
return batch.tensor_or_tensors
def basic(rank, checkpoint):
# get the model, wrap with DDP and fwd, bwd.
set_random_seed(31415)
model = Sequential(Linear(2000, 2000), Linear(2000, 2000))
model.to("cuda")
model = DDP(model, device_ids=[rank])
input_tensor = torch.rand((64, 2000)).cuda()
input_tensor.requires_grad = True
output_tensor = checkpoint(model, input_tensor)
for p in model.parameters():
assert p.grad is None
output_tensor.sum().backward()
norm = 0.0
for p in model.parameters():
assert p.grad is not None
norm += p.grad.norm().item()
assert numpy.allclose(norm, 78053.52978515625), norm
def weight_sharing(rank, checkpoint):
# get the model, wrap with DDP and fwd, bwd.
set_random_seed(31415)
l1 = Linear(2000, 2000)
l2 = Linear(2000, 2000)
l1.weight = l2.weight
model = Sequential(l1, l2)
model.to("cuda")
model = DDP(model, device_ids=[rank])
input_tensor = torch.rand((64, 2000)).cuda()
input_tensor.requires_grad = True
output_tensor = checkpoint(model, input_tensor)
output_tensor.sum().backward()
norm = 0.0
for p in model.parameters():
assert p.grad is not None
norm += p.grad.norm().item()
assert numpy.allclose(norm, 57004.34228515625), norm
def checkpoint_half(rank, checkpoint):
# get the model, wrap with DDP and fwd, bwd.
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.l1 = Linear(2000, 2000)
self.l2 = Linear(2000, 2000)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
set_random_seed(31415)
model = M()
model.to("cuda")
model = DDP(model, device_ids=[rank])
input_tensor = torch.rand((64, 2000)).cuda()
output_tensor = model(input_tensor)
output_tensor.sum().backward()
norm = 0.0
for p in model.parameters():
assert p.grad is not None
norm += p.grad.norm().item()
assert numpy.allclose(norm, 78053.52978515625), norm
def unused_param(rank, checkpoint):
# get the model, wrap with DDP and fwd, bwd.
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
# The size 2000 is important. Without bigger size, it doesn't trigger the RuntimeError!
self.l1 = Linear(2000, 2000)
self.l2 = Linear(2000, 2000)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
model = M()
model.to("cuda")
model = DDP(model, device_ids=[rank], find_unused_parameters=True)
input_tensor = torch.rand((64, 2000)).cuda()
output_tensor = model(input_tensor)
try:
output_tensor.sum().backward()
except RuntimeError:
return
assert 0
def checkpoint_twice(rank, checkpoint):
# get the model, wrap with DDP and fwd, bwd.
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
# The size 2000 is important. Without bigger size, it doesn't trigger the RuntimeError!
self.l1 = Linear(2000, 2000)
self.l2 = Linear(2000, 2000)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
model = M()
model.to("cuda")
model = DDP(model, device_ids=[rank])
input_tensor = torch.rand((64, 2000)).cuda()
output_tensor = model(input_tensor)
try:
output_tensor.sum().backward()
except RuntimeError:
return
assert 0
def run(rank, world_size, temp_file_name, checkpoint, test_func):
# setup
url = "file://" + temp_file_name
dist.init_process_group(init_method=url, backend=dist.Backend.NCCL, rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
# actual test
test_func(rank, checkpoint)
# cleanup
dist.destroy_process_group()
@skip_if_not_needed
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("checkpoint", [pipe_checkpoint, torch_checkpoint])
@pytest.mark.parametrize("test_func", [basic, weight_sharing, checkpoint_half, unused_param, checkpoint_twice])
def test_basic_ddp(checkpoint, test_func):
temp_file_name = tempfile.mkstemp()[1]
world_size = 2
mp.spawn(run, args=(world_size, temp_file_name, checkpoint, test_func), nprocs=world_size, join=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fairscale.nn.pipe.pipeline import clock_cycles
def test_clock_cycles():
assert list(clock_cycles(1, 1)) == [[(0, 0)]]
assert list(clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(clock_cycles(3, 3)) == [ # noqa
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1), (0, 2)],
[(2, 1), (1, 2)],
[(2, 2)],
]
assert list(clock_cycles(4, 2)) == [ # noqa
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1)],
[(3, 0), (2, 1)],
[(3, 1)],
]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import torch
from torch import nn
import torch.cuda
from fairscale.nn.pipe.checkpoint import Checkpointing, Function, TensorOrTensors, is_checkpointing, is_recomputing
from fairscale.nn.pipe.dependency import fork, join
from fairscale.nn.pipe.microbatch import Batch
devices = ["cpu"]
if torch.cuda.is_available():
devices.append("cuda")
def make_checkpoint(function: Function, input: TensorOrTensors, index: int) -> TensorOrTensors:
"""Makes a checkpoint with a simple interface like
:func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug
:class:`Checkpoint` and :class:`Recompute` without boilerplate.
"""
batch = Batch(input, index)
chk = Checkpointing(function, batch)
batch = chk.checkpoint()
chk.recompute(batch)
return batch.tensor_or_tensors
@pytest.mark.parametrize("device", devices)
def test_serial_checkpoints(device):
# Copied from https://github.com/pytorch/pytorch/pull/18568.
timeline = []
class Log(torch.autograd.Function):
@staticmethod
def forward(ctx, name, x):
ctx.name = name
timeline.append(f"{name}:forward")
return x.detach()
@staticmethod
def backward(ctx, grad_output):
name = ctx.name
timeline.append(f"{name}:backward")
return None, grad_output
a = torch.rand(1, device=device, requires_grad=True)
b = torch.rand(1, device=device, requires_grad=True)
# Increase the next function sequence number.
_ = a + 1 + 2 + 3 + 4 + 5
a = make_checkpoint(partial(Log.apply, "a"), a, 0)
a, phony = fork(a)
b = join(b, phony)
b = make_checkpoint(partial(Log.apply, "b"), b, 0)
c = torch.cat((a, b))
out = c.sum()
# +--> {a} --Checkpoint(Log)--> {a}
# {out} --Sum--> {c} --Cat ^-----------------------------+
# +--> {b} --Checkpoint(Log)--> {b} --First--> {b}
out.backward()
assert timeline == ["a:forward", "b:forward", "b:forward", "b:backward", "a:forward", "a:backward"]
# |----------------------| |-----------------------| |-----------------------|
# forward pass Checkpoint(Log[b]) Checkpoint(Log[a])
def test_not_requires_grad():
x = Batch(torch.rand(1, requires_grad=False), 0)
assert not x[0].requires_grad
def f(x):
return x * 2
chk = Checkpointing(f, x)
x = chk.checkpoint()
assert x[0].requires_grad
chk.recompute(x)
assert x[0].requires_grad
x.tensor.backward()
def test_not_requires_grad_with_parameter():
x = torch.rand(1, requires_grad=False)
a = torch.rand(1, requires_grad=True)
def f(x):
return x * a
y = make_checkpoint(f, x, 0)
y.backward()
assert a.grad is not None
@pytest.mark.parametrize("device", devices)
def test_random_in_checkpoint(device):
dropout = nn.Dropout(p=0.5)
torch.manual_seed(0)
x = torch.randn(3, 3, device=device, requires_grad=True)
y = dropout(x)
y.norm().backward()
torch.manual_seed(0)
chk_x = torch.randn(3, 3, device=device, requires_grad=True)
chk_y = make_checkpoint(dropout, chk_x, 0)
chk_y.norm().backward()
assert torch.allclose(x.grad, chk_x.grad)
def test_detect_checkpointing_recomputing():
logs = []
class Detect(nn.Module):
def forward(self, input):
logs.append((is_checkpointing(), is_recomputing()))
return input
model = Detect()
input = torch.rand(1, requires_grad=True)
output = make_checkpoint(model, input, 0)
output.backward()
assert logs == [(True, False), (False, True)]
def test_detect_checkpointing_recomputing_without_checkpoint():
logs = []
class Detect(nn.Module):
def forward(self, input):
logs.append((is_checkpointing(), is_recomputing()))
return input
model = Detect()
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
assert logs == [(False, False)]
def test_non_grad_output():
class ForkNonGrad(nn.Module):
def forward(self, input):
return (input * 2, torch.rand(1))
model = ForkNonGrad()
input = torch.rand(1, requires_grad=True)
output = make_checkpoint(model, input, 0)
output[0].backward()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from fairscale.nn.pipe.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestNewStream:
def test_new_stream_cpu(self):
stream = new_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_new_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream != torch.cuda.default_stream()
class TestCurrentStream:
def test_current_stream_cpu(self):
stream = current_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_current_stream_cuda(self):
stream = current_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.current_stream()
class TestDefaultStream:
def test_default_stream_cpu(self):
stream = default_stream(torch.device("cpu"))
assert stream is CPUStream
@skip_if_no_cuda
def test_default_stream_cuda(self):
stream = default_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.default_stream()
class TestUseDevice:
def test_use_device_cpu(self):
with use_device(torch.device("cpu")):
pass
@skip_if_no_cuda
def test_use_device_cuda(self):
with use_device(torch.device("cuda")):
pass
class TestUseStream:
def test_use_stream_cpu(self):
with use_stream(CPUStream):
pass
@skip_if_no_cuda
def test_use_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
assert current_stream(torch.device("cuda")) == stream
class TestGetDevice:
def test_get_device_cpu(self):
assert get_device(CPUStream).type == "cpu"
@skip_if_no_cuda
def test_get_device_cuda(self):
stream = current_stream(torch.device("cuda"))
assert get_device(stream).type == "cuda"
class TestWaitStream:
def _test_wait_stream(self, source, target, cuda_sleep=None):
with use_stream(target):
if is_cuda(target):
cuda_sleep(0.5)
x = torch.ones(100, 100, device=get_device(target))
wait_stream(source, target)
with use_stream(source):
assert x.sum().item() == 10000
def test_wait_stream_cpu_cpu(self):
source = CPUStream
target = CPUStream
self._test_wait_stream(source, target)
@skip_if_no_cuda
def test_wait_stream_cpu_cuda(self, cuda_sleep):
source = CPUStream
target = new_stream(torch.device("cuda"))
self._test_wait_stream(source, target, cuda_sleep)
@skip_if_no_cuda
def test_wait_stream_cuda_cpu(self, cuda_sleep):
source = new_stream(torch.device("cuda"))
target = CPUStream
self._test_wait_stream(source, target, cuda_sleep)
@skip_if_no_cuda
def test_wait_stream_cuda_cuda(self, cuda_sleep):
source = current_stream(torch.device("cuda"))
target = new_stream(torch.device("cuda"))
self._test_wait_stream(source, target, cuda_sleep)
class TestRecordStream:
def test_record_stream_cpu(self):
# It should silently ignore CPU tensors.
x = torch.rand(1, device=torch.device("cpu"))
record_stream(x, CPUStream)
@skip_if_no_cuda
def test_record_stream_cuda(self, cuda_sleep):
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = new_stream(torch.device("cuda"))
with torch.cuda.stream(stream_alloc):
x = torch.rand(1, device=torch.device("cuda"))
stream = new_stream(torch.device("cuda"))
record_stream(x, stream)
with use_stream(stream):
cuda_sleep(0.5)
# 'x' is deleted at Python's perspective. But the block of 'x' is still
# required for 'stream'. 'y' shouldn't be allocated to the block.
data_ptr = x.data_ptr()
del x
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
y = torch.rand(1, device=torch.device("cuda"))
assert y.data_ptr() != data_ptr
# Pause Python until 'stream' finishes tasks queued. Now the block of
# 'x' is free to be reallocated.
wait_stream(CPUStream, stream)
with torch.cuda.stream(stream_alloc):
z = torch.rand(1, device=torch.device("cuda"))
assert z.data_ptr() == data_ptr
@skip_if_no_cuda
def test_record_stream_shifted_view(self, cuda_sleep):
# Issue: https://github.com/pytorch/pytorch/issues/27366
stream_alloc = new_stream(torch.device("cuda"))
with torch.cuda.stream(stream_alloc):
x = torch.rand(2, device=torch.device("cuda"))
y = x[1:]
assert y.data_ptr() > x.data_ptr()
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
cuda_sleep(0.5)
record_stream(y, stream)
data_ptr = x.data_ptr()
del x, y
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
z = torch.rand(2, device=torch.device("cuda"))
assert z.data_ptr() != data_ptr
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
l1 = Stash()
assert len(skip_tracker.tensors) == 0
with use_skip_tracker(skip_tracker):
l1(torch.tensor(42))
assert len(skip_tracker.tensors) == 1
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
output = l2(l1(torch.tensor(42)))
assert output.item() == 42
def test_declare_but_not_use():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
return input * 3
l1 = Stash()
l2 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(torch.tensor(42))
def test_stash_not_declared():
@skippable()
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
l1 = Stash()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_pop_not_declared():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable()
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
l1 = Stash()
l2 = Pop()
latent = l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(latent)
def test_pop_not_stashed():
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
yield pop("foo")
l1 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_stash_none():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", None)
return input * 2
l1 = Stash()
l1(torch.tensor(42))
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Queue
import threading
import pytest
import torch
from torch import nn
from fairscale.nn.pipe.checkpoint import enable_checkpointing, enable_recomputing
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.layout import SkipLayout
from fairscale.nn.pipe.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
def f():
q.put(current_skip_tracker())
t = threading.Thread(target=f)
t.start()
t.join()
skip_tracker = q.get()
assert type(skip_tracker) is SkipTracker
assert type(skip_tracker) is not SkipTrackerThroughPotals
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_default_skip_tracker_by_data_parallel():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
model = nn.Sequential(Stash(), Pop())
model = nn.DataParallel(model, device_ids=[0, 0], output_device=0)
input = torch.rand(10, device=0)
output = model(input)
assert torch.allclose(output, input)
def test_reuse_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", a)
portal = skip_tracker.portals[(None, "test")]
skip_tracker.save(batch, None, "test", b)
assert portal is skip_tracker.portals[(None, "test")]
def test_no_copy_no_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "copy", a)
skip_tracker.save(batch, None, "not_copy", b)
assert (None, "copy") in skip_tracker.portals
assert (None, "copy") not in skip_tracker.tensors
assert (None, "not_copy") in skip_tracker.tensors
assert (None, "not_copy") not in skip_tracker.portals
def test_tensor_life_without_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
tensor = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 1
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
def test_tensor_life_with_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout, 0)
batch = Batch(torch.tensor([1.0]), 0)
tensor = torch.tensor([2.0])
with enable_checkpointing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 2
with enable_checkpointing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 1
with enable_recomputing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
with enable_recomputing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from torch import nn
from fairscale.nn.pipe.skip import Namespace, skippable, verify_skippables
def test_matching():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2()))
def test_stash_not_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "no module declared 'foo' as poppable but stashed" in str(e.value)
def test_pop_unknown():
@skippable(pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' as poppable but it was not stashed" in str(e.value)
def test_stash_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(stash=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'1' redeclared 'foo' as stashable" in str(e.value)
def test_pop_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'2' redeclared 'foo' as poppable" in str(e.value)
def test_stash_pop_together_different_names():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"], stash=["bar"])
class Layer2(nn.Module):
pass
@skippable(pop=["bar"])
class Layer3(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
def test_stash_pop_together_same_name():
@skippable(stash=["foo"], pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' both as stashable and as poppable" in str(e.value)
def test_double_stash_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3(), Layer4()))
assert "'2' redeclared 'foo' as stashable" in str(e.value)
assert "'3' redeclared 'foo' as poppable" in str(e.value)
def test_double_stash_pop_but_isolated():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
ns1 = Namespace()
ns2 = Namespace()
verify_skippables(
nn.Sequential(
Layer1().isolate(ns1),
Layer2().isolate(ns1),
Layer3().isolate(ns2),
Layer4().isolate(ns2),
)
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from fairscale.nn.pipe.dependency import fork, join
from fairscale.nn.pipe.skip.portal import Portal
from fairscale.nn.pipe.stream import default_stream
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_copy_returns_on_next_device():
portal = Portal(torch.rand(1), tensor_life=1, index=0)
prev_stream = default_stream(torch.device("cpu"))
next_stream = default_stream(torch.device("cuda"))
phony = torch.zeros(0, requires_grad=True)
assert phony.device.type == "cpu"
phony = portal.copy(prev_stream, next_stream, phony)
assert phony.device.type == "cuda"
def test_blue_orange():
tensor1 = torch.rand(1, requires_grad=True)
tensor2 = torch.rand(1, requires_grad=True)
# Same with: output = tensor1*2 + tensor2
#
# +----------------------+
# | |
# tensor2 -- PortalBlue -+ +- PortalOrange -+
# | | |
# tensor1 ------------ Join -- Fork --- Mul --- Add -- output
#
main = tensor1
portal = Portal(tensor2, tensor_life=2, index=0)
phony = portal.blue()
main = join(main, phony)
main, phony = fork(main)
sub = portal.orange(phony)
output = main * 2 + sub
output.backward()
assert torch.allclose(tensor1.grad, torch.tensor([2.0]))
assert torch.allclose(tensor2.grad, torch.tensor([1.0]))
def test_blue_orange_not_requires_grad():
tensor1 = torch.rand(1, requires_grad=True)
tensor2 = torch.rand(1)
# Same with: output = tensor1*2 + tensor2
#
# +----------------------+
# | |
# tensor2 -- PortalBlue -+ +- PortalOrange -+
# | | |
# tensor1 ------------ Join -- Fork --- Mul --- Add -- output
#
main = tensor1
portal = Portal(tensor2, tensor_life=2, index=0)
phony = portal.blue()
main = join(main, phony)
main, phony = fork(main)
sub = portal.orange(phony)
output = main * 2 + sub
output.backward()
assert torch.allclose(tensor1.grad, torch.tensor([2.0]))
assert tensor2.grad is None
def test_use_grad():
tensor = torch.rand(1, requires_grad=True)
portal = Portal(tensor, tensor_life=1, index=0)
portal.put_grad(tensor)
assert portal.use_grad() is tensor
# Gradient in a portal is ephemeral.
with pytest.raises(RuntimeError):
portal.use_grad()
class TestTensorLife:
@pytest.fixture
def new_portal(self):
portal = None
def new_portal(tensor_life):
nonlocal portal
tensor = torch.rand(1, requires_grad=True)
portal = Portal(tensor, tensor_life, 0)
return portal, tensor
yield new_portal
# A test using this fixture must exhaust the tensor in the portal.
with pytest.raises(RuntimeError):
portal.check_tensor_life()
assert portal.tensor is None
def test_tensor_life_0(self, new_portal):
portal, tensor = new_portal(0)
assert portal.tensor is None
def test_tensor_life_1(self, new_portal):
portal, tensor = new_portal(1)
assert portal.tensor is tensor
portal.blue()
def test_tensor_life_2(self, new_portal):
portal, tensor = new_portal(2)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
def test_tensor_life_3(self, new_portal):
portal, tensor = new_portal(3)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
def test_tensor_life_4(self, new_portal):
portal, tensor = new_portal(4)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
portal.blue()
def test_tensor_life_3_plus_1(self, new_portal):
portal, tensor = new_portal(3)
assert portal.tensor is tensor
phony = portal.blue()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
assert portal.orange(phony).data_ptr() == tensor.data_ptr()
another_tensor = torch.rand(1, requires_grad=True)
portal.put_tensor(another_tensor, tensor_life=1)
portal.blue()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.nn.pipe import Pipe, is_checkpointing, is_recomputing
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.tracker import current_skip_tracker
@skippable(stash=["skip"])
class Stash(nn.Module):
def forward(self, input):
yield stash("skip", input)
return input
@skippable(pop=["skip"])
class Pop(nn.Module):
def forward(self, input):
skip = yield pop("skip")
return input + skip
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
@pytest.mark.parametrize("checkpoint", ["always", "except_last", "never"])
def test_delete_portal_tensor(train, checkpoint):
# Without checkpointing:
# +- Stash --+ +--- Pop ----+ - - - layers
# | 2,blue,1 |--| 1,orange,0 | - - - tensor_life and portal function
# +----------+ +------------+
#
# With checkpointing:
# +- Stash --+ +--- Pop ----+ +--- Pop'----+ +- Stash'--+
# | 3,blue,2 |--| 2,orange,1 |--| 1,orange,0 |--| 1,blue,0 |
# +----------+ +------------+ +------------+ +----------+
def portal_tensor_life_is(tensor_life, skip_tracker=None):
if skip_tracker is None:
skip_tracker = current_skip_tracker()
# Get the current portal.
portal = list(skip_tracker.portals.values())[0]
if tensor_life == 0:
return portal.tensor_life == 0 and portal.tensor is None
else:
return portal.tensor_life == tensor_life and portal.tensor is not None
# Check the portal tensor after 'Stash'.
stash_ = Stash()
@stash_.register_forward_hook
def check_portal_tensor_after_stash(*_):
if is_checkpointing():
assert portal_tensor_life_is(2)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
assert portal_tensor_life_is(1)
pop_ = Pop()
@pop_.register_forward_hook
def check_portal_tensor_after_pop(*_):
if is_checkpointing():
assert portal_tensor_life_is(1)
elif is_recomputing():
assert portal_tensor_life_is(0)
else:
assert portal_tensor_life_is(0)
class NoPortalTensorAtBackward(nn.Module):
class F(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.skip_tracker = current_skip_tracker()
return input.detach()
@staticmethod
def backward(ctx, grad):
assert portal_tensor_life_is(0, skip_tracker=ctx.skip_tracker)
return grad
def forward(self, input):
return self.F.apply(input)
model = nn.Sequential(NoPortalTensorAtBackward(), stash_, pop_)
model = Pipe(model, balance=[2, 1], devices=["cpu", "cpu"], chunks=2, checkpoint=checkpoint)
input = torch.rand(10, requires_grad=True)
if train:
model.train()
output = model(input)
output.norm().backward()
else:
model.eval()
with torch.no_grad():
model(input)
@pytest.mark.parametrize("train", [True, False], ids=["train", "eval"])
def test_no_portal_without_pipe(train, monkeypatch):
def deny(*args, **kwargs):
raise AssertionError("tried to create Portal without Pipe")
monkeypatch.setattr("fairscale.nn.pipe.skip.portal.Portal.__init__", deny)
model = nn.Sequential(Stash(), Pop())
input = torch.rand(10, requires_grad=True)
if train:
model.train()
output = model(input)
output.norm().backward()
else:
model.eval()
with torch.no_grad():
model(input)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from torch import nn
from fairscale.nn.pipe.skip import Namespace, skippable, stash
def test_namespace_difference():
ns1 = Namespace()
ns2 = Namespace()
assert ns1 != ns2
def test_namespace_copy():
ns = Namespace()
assert copy.copy(ns) == ns
assert copy.copy(ns) is not ns
def test_skippable_repr():
@skippable(stash=["hello"])
class Hello(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
yield stash("hello", x)
return self.conv(x)
m = Hello()
assert (
repr(m)
== """
@skippable(Hello(
(conv): Conv2d(1, 1, kernel_size=(1, 1), stride=(1, 1))
))
""".strip()
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from fairscale.nn.pipe.skip import Namespace, pop, skippable, stash
from fairscale.nn.pipe.skip.layout import inspect_skip_layout
class Pass(nn.Module):
def forward(self, input):
return input
@skippable(stash=["foo"])
class StashFoo(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input
@skippable(pop=["foo"])
class PopFoo(nn.Module):
def forward(self, input):
foo = yield stash("foo")
return input + foo
@skippable(stash=["bar"])
class StashBar(nn.Module):
def forward(self, input):
yield stash("bar", input)
return input
@skippable(pop=["bar"])
class PopBar(nn.Module):
def forward(self, input):
bar = yield pop("bar")
return input + bar
def test_no_skippables():
p1 = nn.Sequential(Pass())
p2 = nn.Sequential(Pass())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], []]
def test_inner_partition():
p1 = nn.Sequential(StashFoo(), PopFoo())
p2 = nn.Sequential(Pass())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], []]
def test_adjoining_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(PopFoo())
layout = inspect_skip_layout([p1, p2])
policy = [list(layout.copy_policy(i)) for i in range(2)]
assert policy == [[], [(0, None, "foo")]]
def test_far_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(Pass())
p3 = nn.Sequential(PopFoo())
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
assert policy == [[], [], [(0, None, "foo")]]
def test_pop_2_from_different_partitions():
p1 = nn.Sequential(StashFoo())
p2 = nn.Sequential(StashBar())
p3 = nn.Sequential(PopBar(), PopFoo())
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
# p3 pops 'bar' before 'foo', but the plan is sorted by source partition index.
assert policy == [[], [], [(0, None, "foo"), (1, None, "bar")]]
def test_namespace():
ns1 = Namespace()
ns2 = Namespace()
p1 = nn.Sequential(StashFoo().isolate(ns1))
p2 = nn.Sequential(StashFoo().isolate(ns2))
p3 = nn.Sequential(PopFoo().isolate(ns2), PopFoo().isolate(ns1))
layout = inspect_skip_layout([p1, p2, p3])
policy = [list(layout.copy_policy(i)) for i in range(3)]
# p3 pops 'bar' before 'foo', but the plan is sorted by source partition index.
assert policy == [[], [], [(0, ns1, "foo"), (1, ns2, "foo")]]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import skip_if_single_gpu
from fairscale.nn.pipe import Pipe
from fairscale.nn.pipe.skip import pop, skippable, stash
from fairscale.nn.pipe.skip.portal import PortalBlue, PortalCopy, PortalOrange
@skip_if_single_gpu
@pytest.mark.parametrize("balance", [[3], [1, 2], [2, 1], [1, 1, 1]], ids=["3", "1:2", "2:1", "1:1:1"])
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_1to3(balance, checkpoint):
if torch.cuda.device_count() < len(balance):
pytest.skip("at least %d cuda devices required" % len(balance))
@skippable(stash=["1to3"])
class Layer1(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
yield stash("1to3", input)
output = self.conv(input)
return output
class Layer2(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
output = self.conv(input)
return output
@skippable(pop=["1to3"])
class Layer3(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
def forward(self, input):
skip_1to3 = yield pop("1to3")
output = self.conv(input) + skip_1to3
return output
model = nn.Sequential(Layer1(), Layer2(), Layer3())
model = Pipe(model, balance, chunks=3, checkpoint=checkpoint)
in_device = model.devices[0]
out_device = model.devices[-1]
input = torch.rand(30, 3, 224, 224, device=in_device, requires_grad=True)
output = model(input)
loss = output.mean()
loss.backward()
assert torch.allclose(output.norm(), torch.tensor(1039.0, device=out_device), atol=5e-1)
assert torch.allclose(input.grad.norm(), torch.tensor(0.0004533053, device=in_device))
def test_none_skip():
@skippable(stash=["none"])
class Stash(nn.Module):
def forward(self, input):
yield stash("none", None)
return input
@skippable(pop=["none"])
class Pop(nn.Module):
def forward(self, input):
none = yield pop("none")
assert none is None
return input
model = nn.Sequential(Stash(), Pop())
model = Pipe(model, [1, 1], devices=["cpu", "cpu"], chunks=5)
input = torch.rand(10, requires_grad=True)
output = model(input)
def assert_grad_fn_is_not_portal(grad_fn, visited=set()):
if grad_fn in visited or grad_fn is None:
return
assert not isinstance(grad_fn, PortalBlue._backward_cls)
assert not isinstance(grad_fn, PortalCopy._backward_cls)
assert not isinstance(grad_fn, PortalOrange._backward_cls)
visited.add(grad_fn)
for next_grad_fn, _ in grad_fn.next_functions:
assert_grad_fn_is_not_portal(next_grad_fn, visited)
assert_grad_fn_is_not_portal(output.grad_fn)
output.sum().backward()
assert input.grad.mean().item() == 1
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test checkpoint_wrapper with normalization layers. """
import pytest
import torch
from torch.nn import BatchNorm2d, LayerNorm, Linear, Sequential
from torch.optim import SGD
from fairscale.fair_dev.testing.testing import objects_are_equal
from fairscale.internal import torch_version
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
NORM_TYPES = [LayerNorm, BatchNorm2d]
MP_TYPES = ["fp32", "fp16", "call_half"]
def get_model(norm_type, checkpointed, mixed_precision):
assert norm_type in NORM_TYPES, norm_type
assert checkpointed in [True, False], checkpointed
assert mixed_precision in MP_TYPES
model = Sequential(Linear(3, 2), norm_type(2))
if mixed_precision == "fp16":
# Set param.data and buffers as fp16
for p in model.parameters():
p.data = p.data.half()
for m in model:
for n, b in m.named_buffers():
setattr(m, n, b.half())
elif mixed_precision == "call_half":
model.half()
if checkpointed:
model = checkpoint_wrapper(model)
return model
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("norm_type", NORM_TYPES)
@pytest.mark.parametrize("mixed_precision", MP_TYPES)
def test_norm(device, norm_type, mixed_precision):
"""Test checkpoint_wrapper with different norm layers."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("Skip due to lack of GPU")
# Get input, ref, checkpoint models and make them equal.
in_data = torch.rand(2, 2, 3, 3).to(device)
m_ref = get_model(norm_type, False, mixed_precision).to(device)
m_cpt = get_model(norm_type, True, mixed_precision).to(device)
m_cpt.load_state_dict(m_ref.state_dict())
if torch_version() >= (1, 6, 0):
# This assert fails on 1.5.1.
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
if mixed_precision != "fp32":
in_data = in_data.half()
# Needed due to checkpointing.
in_data.requires_grad = True
for model in (m_ref, m_cpt):
optim = SGD(model.parameters(), lr=0.1)
if device == "cpu" and mixed_precision != "fp32":
# Got: RuntimeError: "batch_norm"/"layer_norm" not implemented for 'Half'.
with pytest.raises(RuntimeError):
out = model(in_data)
return
else:
# Everything else work.
out = model(in_data)
out.sum().backward()
optim.step()
if torch_version() >= (1, 6, 0):
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Test fairscale.nn.misc.checkpoint_activations API."""
import pytest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint as torch_checkpoint_wrapper
from fairscale.fair_dev.testing.testing import skip_if_no_cuda
from fairscale.internal import torch_version
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper, disable_checkpointing
from fairscale.nn.misc import FlattenParamsWrapper
from fairscale.nn.misc import checkpoint_wrapper as deprecated_checkpoint_wrapper
def get_cuda_mem_allocated():
"""Helper to get cuda memory allocated if possible."""
if torch.cuda.is_available():
return torch.cuda.memory_allocated()
else:
return 0
def get_loss_and_gnorm(model, input):
"""Helper to run a forward/backward pass and return results in a dict."""
ret = {}
ret["mem_0"] = get_cuda_mem_allocated()
ret["mem_peak"] = 0
if ret["mem_0"] > 0:
torch.cuda.reset_peak_memory_stats()
model.zero_grad()
loss = model(input).sum()
ret["mem_after_fwd"] = get_cuda_mem_allocated()
loss.backward()
ret["mem_after_bwd"] = get_cuda_mem_allocated()
gnorm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]))
ret["loss"] = loss.item()
ret["gnorm"] = gnorm.item()
if ret["mem_0"] > 0:
ret["mem_peak"] = torch.cuda.max_memory_allocated()
return ret
class BasicModel(nn.Module):
"""Basic model with a single FFN being checkpointed.
Used for extensive checkings: equivalency with non-checkpoint, torch-checkpoint, etc.
"""
def __init__(self, use_pytorch_checkpoint=False, use_fairscale_checkpoint=False, **kwargs):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
assert not (
use_pytorch_checkpoint and use_fairscale_checkpoint
), "Cannot use both pytorch and fairscale checkpointing mechanisms."
self.use_pytorch_checkpoint = use_pytorch_checkpoint
self.ffn = nn.Sequential(
nn.Linear(32, 128),
# add a Dropout layer to test RNG save/restore
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
if use_fairscale_checkpoint:
self.ffn = checkpoint_wrapper(self.ffn, **kwargs)
self.out = nn.Linear(32, 1)
def forward(self, x):
if self.use_pytorch_checkpoint:
x = torch_checkpoint_wrapper(self.ffn, x)
else:
x = self.ffn(x)
return self.out(x)
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.skipif(
torch_version() >= (1, 13, 0),
reason="mem_peak behavior changed for torch 1.13 and above",
)
def test_basic(device):
if "cuda" in device and not torch.cuda.is_available():
pytest.skip("test requires a GPU")
input = torch.rand(2, 16, 32).requires_grad_(True)
model = BasicModel().to(device)
no_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_pytorch_checkpoint=True).to(device)
pyt_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_fairscale_checkpoint=True).to(device)
fairscale_cpt = get_loss_and_gnorm(model, input.to(device))
model = BasicModel(use_fairscale_checkpoint=True, offload_to_cpu=True).to(device)
fairscale_cpt_offload = get_loss_and_gnorm(model, input.to(device))
# Check for correctness.
for key in "loss", "gnorm":
if not (no_cpt[key] == pyt_cpt[key] == fairscale_cpt[key] == fairscale_cpt_offload[key]):
print(no_cpt, pyt_cpt, fairscale_cpt, fairscale_cpt_offload)
assert 0
del no_cpt[key]
del pyt_cpt[key]
del fairscale_cpt[key]
del fairscale_cpt_offload[key]
# Check for memory usage for cuda only.
if "cpu" in device:
return
mem_peaks = [98816, 103424, 103424, 107520]
if torch_version() < (1, 7, 0):
# Older torch behaves slightly differently
mem_peaks = [102400, 103424, 103424, 107520]
assert no_cpt == {"mem_0": 38912, "mem_peak": mem_peaks[0], "mem_after_fwd": 64000, "mem_after_bwd": 74240}, no_cpt
assert pyt_cpt == {
"mem_0": 38912,
"mem_peak": mem_peaks[1],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, pyt_cpt
assert fairscale_cpt == {
"mem_0": 38912,
"mem_peak": mem_peaks[2],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, fairscale_cpt
assert fairscale_cpt_offload == {
"mem_0": 38912,
"mem_peak": mem_peaks[3],
"mem_after_fwd": 43520,
"mem_after_bwd": 74240,
}, fairscale_cpt_offload
class CpuOffloadModel(nn.Module):
"""Model used to check cpu offload memory saving"""
def __init__(self, enable_checkpoint=False, cpu_offload=False):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
# These numbers are picked to show cpu_offload memory saving.
# Inner (recomputed) activation sizes need to be just right
# to show the benefit.
self.layers = nn.Sequential(
nn.Sequential(nn.Linear(4, 4), nn.Linear(4, 4), nn.Linear(4, 8)),
nn.Sequential(nn.Linear(8, 4), nn.Linear(4, 4), nn.Linear(4, 4)),
nn.Sequential(nn.Linear(4, 6), nn.Linear(6, 8), nn.Linear(8, 2)),
)
if enable_checkpoint:
for i, layer in enumerate(self.layers):
# Only middle layer needs to have offloading
self.layers[i] = checkpoint_wrapper(layer, cpu_offload if i == 1 else False)
def forward(self, x):
return self.layers(x)
@skip_if_no_cuda
def test_offload_memory():
if torch_version() >= (1, 12, 0):
pytest.skip("to be fixed")
device = "cuda"
input = torch.rand(60, 24, 4).requires_grad_(True)
model = CpuOffloadModel().to(device)
base = get_loss_and_gnorm(model, input.to(device))
model = CpuOffloadModel(True).to(device)
cpt = get_loss_and_gnorm(model, input.to(device))
model = CpuOffloadModel(True, True).to(device)
offload = get_loss_and_gnorm(model, input.to(device))
for key in "loss", "gnorm":
if not (base[key] == cpt[key] == offload[key]):
# Use print to collect all debugging info.
print(base, cpt, offload)
assert 0
del base[key]
del cpt[key]
del offload[key]
ref_base = {"mem_0": 32256, "mem_peak": 334336, "mem_after_fwd": 274944, "mem_after_bwd": 41984}
ref_cpt = {"mem_0": 32256, "mem_peak": 253952, "mem_after_fwd": 101888, "mem_after_bwd": 41984}
ref_offload = {"mem_0": 32256, "mem_peak": 207872, "mem_after_fwd": 55808, "mem_after_bwd": 41984}
if not (base == ref_base and cpt == ref_cpt and offload == ref_offload):
# Use print to collect all debugging info.
print(base, cpt, offload)
assert 0
class MultiinMultioutModel(nn.Module):
"""Model used to check different inputs and outputs"""
def __init__(self, multiout=False, checkpoint_config=0):
super().__init__()
torch.manual_seed(0) # make sure weights are deterministic.
self.multiout = multiout
self.conv1 = nn.Sequential(nn.Conv2d(1, 5, 3), nn.ReLU(), nn.Conv2d(5, 5, 3))
self.conv2 = nn.Sequential(nn.Conv2d(3, 5, 3), nn.ReLU(), nn.Conv2d(5, 5, 3))
assert 0 <= checkpoint_config <= 3
if checkpoint_config & 1:
self.conv1 = checkpoint_wrapper(self.conv1)
if checkpoint_config & (1 << 1):
self.conv2 = checkpoint_wrapper(self.conv2)
def forward(self, x1, x2=None):
out1 = self.conv1(x1)
out2 = self.conv2(x2)
if self.multiout:
return out1, out2
return out1 + out2
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("multiout", [True, False])
@pytest.mark.parametrize("checkpoint_config", [1, 2, 3])
def test_multiin_multiout(device, multiout, checkpoint_config):
if "cuda" in device and not torch.cuda.is_available():
pytest.skip("test requires a GPU")
def train(model, in1, in2):
out = model(in1, x2=in2)
if isinstance(out, tuple):
out = torch.cat(out)
loss = out.sum()
loss.backward()
gnorm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]))
return {"loss": loss.item(), "gnorm": gnorm.item()}
in1 = torch.rand(4, 1, 32, 32).requires_grad_(True)
in2 = torch.rand(4, 3, 32, 32).requires_grad_(True)
model = MultiinMultioutModel(multiout, 0).to(device)
no_cpt = train(model, in1.to(device), in2.to(device))
model = MultiinMultioutModel(multiout, checkpoint_config).to(device)
cpt = train(model, in1.to(device), in2.to(device))
for key in ["loss", "gnorm"]:
if no_cpt[key] != cpt[key]:
print(no_cpt, cpt)
assert 0
def test_deprecated_path():
# Check if import works as before.
# from fairscale.nn.misc.checkpoint_activations import checkpoint_wrapper
from fairscale.nn import checkpoint_wrapper
ffn = nn.Sequential(
nn.Linear(32, 128),
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
ffn = checkpoint_wrapper(ffn, {})
# Check if direct import works as before.
ffn = nn.Sequential(
nn.Linear(32, 128),
nn.Dropout(p=0.5),
nn.Linear(128, 32),
)
ffn = deprecated_checkpoint_wrapper(ffn, {})
@skip_if_no_cuda
def test_list_input():
"""Test checkpointing with input argument type being a list.
Note: Testing shows that PyTorch's torch.utils.checkpoint function does not pass this test.
"""
count = 0
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(2, 2)
def forward(self, x):
nonlocal count
count += 1
y = []
for i in x:
y.append(self.conv(i))
return y
model = nn.Sequential(checkpoint_wrapper(Model()), Model()).cuda()
in_data1 = torch.rand(4, 2).cuda()
in_data2 = torch.rand(4, 2).cuda()
# Forward. Count should be 2 for 2 modules.
out = model([in_data1, in_data2])
loss = sum(x.sum() for x in out)
assert count == 2, f"Incorrect count {count}"
# Backward. Adds 1 more forward call due to checkpoint.
loss.backward()
assert count == 3, f"Incorrect count {count}"
def test_checkpoint_disabling():
"""Test to check new disable_checkpoint() API added to checkpoint_wrapper."""
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.cnt = 0
self.linear = nn.Linear(2, 2)
def forward(self, x):
self.cnt += 1
y = []
for i in x:
y.append(self.linear(i))
return y
x = torch.rand(4, 2)
model1 = checkpoint_wrapper(TestModel())
model2 = checkpoint_wrapper(TestModel())
# Forward. cnt += 1
y = model1(x)
y = sum(i.sum() for i in y)
# Backward. cnt += 1
y.backward()
assert model1.cnt == 2
with disable_checkpointing():
# Forward. cnt += 1
y = model2(x)
y = sum(i.sum() for i in y)
# Backward. cnt remains same as checkpointing is disabled
y.backward()
assert model2.cnt == 1
def test_checkpoint_requires_grad():
"""Test to check checkpointing when outputs do not require gradient."""
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.cnt = 0
self.linear = nn.Linear(2, 2)
def forward(self, x):
self.cnt += 1
return self.linear(x)
x = torch.rand(4, 2)
model = nn.Sequential(
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
)
model[0].requires_grad_(False)
model[1].requires_grad_(False)
model[2].requires_grad_(False)
y = model(x)
y = y.sum()
y.backward()
# Since only last model needs grad, we only run forward twice for it
assert model[0].cnt == 1
assert model[1].cnt == 1
assert model[2].cnt == 1
assert model[3].cnt == 2
# Now test with first model needing grad
model = nn.Sequential(
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
checkpoint_wrapper(TestModel()),
)
model[0].requires_grad_(True)
model[1].requires_grad_(False)
model[2].requires_grad_(False)
y = model(x)
y = y.sum()
y.backward()
# Since first model needs grad, all models need grad, so we run forward twice for all
assert model[0].cnt == 2
assert model[1].cnt == 2
assert model[2].cnt == 2
assert model[3].cnt == 2
# Stress test with multiple inputs/outputs, of which some are not Tensor
class TestModel2(nn.Module):
def __init__(self):
super().__init__()
self.cnt = 0
self.linear = nn.Linear(2, 2)
def forward(self, x, y, z):
self.cnt += 1
z = z + [self.cnt]
return self.linear(x + y), z, ["hi"]
model1 = checkpoint_wrapper(TestModel())
model2 = checkpoint_wrapper(TestModel())
model3 = checkpoint_wrapper(TestModel2())
model4 = checkpoint_wrapper(TestModel())
model1.requires_grad_(False)
model2.requires_grad_(False)
y = model4(model3(model1(x), model2(x), ["bye"])[0])
y = y.sum()
y.backward()
assert model1.cnt == 1
assert model2.cnt == 1
assert model3.cnt == 2
assert model4.cnt == 2
model1 = checkpoint_wrapper(TestModel())
model2 = checkpoint_wrapper(TestModel())
model3 = checkpoint_wrapper(TestModel2())
model4 = checkpoint_wrapper(TestModel())
model2.requires_grad_(False)
y = model4(model3(model1(x), model2(x), ["bye"])[0])
y = y.sum()
y.backward()
assert model1.cnt == 2
assert model2.cnt == 1
assert model3.cnt == 2
assert model4.cnt == 2
# Test flattened pararameters
model = nn.Sequential(
FlattenParamsWrapper(checkpoint_wrapper(TestModel())),
FlattenParamsWrapper(checkpoint_wrapper(TestModel())),
FlattenParamsWrapper(checkpoint_wrapper(TestModel())),
FlattenParamsWrapper(checkpoint_wrapper(TestModel())),
)
model[0].requires_grad_(False)
model[1].requires_grad_(False)
y = model(x)
y = y.sum()
y.backward()
assert model[0].cnt == 1
assert model[1].cnt == 1
assert model[2].cnt == 2
assert model[3].cnt == 2
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Any, Callable
import pytest
import torch
from fairscale.nn.model_parallel import destroy_model_parallel
@pytest.fixture(autouse=True)
def manual_seed_zero() -> None:
torch.manual_seed(0)
def cuda_sleep_impl(seconds, cycles_per_ms):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
@pytest.fixture(scope="session")
def cuda_sleep() -> Callable:
# Warm-up CUDA.
torch.empty(1, device="cuda")
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return functools.partial(cuda_sleep_impl, cycles_per_ms=cycles_per_ms)
def pytest_report_header() -> str:
return f"torch: {torch.__version__}"
def pytest_runtest_setup(item: Any) -> None:
print("setup mpi function called")
def pytest_runtest_teardown(item: Any) -> None:
if "OMPI_COMM_WORLD_RANK" in os.environ:
destroy_model_parallel()
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
try:
torch.distributed.rpc.shutdown()
except Exception:
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import get_worker_map, set_random_seed, torch_spawn
from fairscale.nn.pipe import AsyncPipe
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def simple_linears(pipe_class):
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
def zero_grad(parameters):
for p in parameters:
p.grad = None
set_random_seed(12345)
inputs = torch.rand(8, 1)
model = nn.Sequential(
nn.Linear(1, 2),
nn.Linear(2, 4),
nn.Linear(4, 2),
nn.Linear(2, 1),
)
# Without MultiProcessPipe
outputs = model(inputs)
loss = outputs.mean()
loss.backward()
grad_without_pipe = [
sum_grad([*model[0].parameters(), *model[1].parameters()]),
sum_grad([*model[2].parameters(), *model[3].parameters()]),
]
ref_without_pipe = [p.grad for p in model.parameters()]
zero_grad(model.parameters())
model = pipe_class(model, [2, 2], worker_map=get_worker_map(), chunks=4)
outputs = model(inputs)
if model.group.rank() == 1:
loss = outputs.mean()
loss.backward()
grad_with_pipe = sum_grad(model.partition.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe[1])
else:
model.back_helper(outputs)
grad_with_pipe = sum_grad(model.partition.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe[0])
torch.distributed.barrier()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import get_worker_map, torch_spawn
from fairscale.nn.pipe import AsyncPipe
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def inplace_on_requires_grad(pipe_class):
model = nn.Sequential(nn.Linear(1, 1), nn.ReLU(inplace=True))
model = pipe_class(model, [1, 1], worker_map=get_worker_map(), checkpoint="always")
x = torch.rand(1)
if pipe_class == AsyncPipe and model.group.rank() == 0:
# With AsyncPipe, model will wait forever for gradients if not eval
model.eval()
y = model(x)
message = r"a leaf Variable that requires grad .* used in an in-place operation."
if model.group.rank() == 1:
with pytest.raises(RuntimeError, match=message):
y.backward()
torch.distributed.barrier()
@torch_spawn([1])
@pytest.mark.xfail(strict=True)
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def inplace_on_not_requires_grad(pipe_class):
# In-place operation on a tensor not requiring grad doesn't cause a
# RuntimeError. Currently, we cannot detect this case.
model = nn.Sequential(nn.ReLU(inplace=True))
model = pipe_class(model, [1], worker_map=get_worker_map(), checkpoint="always")
x = torch.rand(1)
y = model(x)
del model
message = r"a leaf Variable that requires grad .* used in an in-place operation."
with pytest.raises(RuntimeError, match=message):
y.backward()
torch.distributed.barrier()
@torch_spawn([1])
@pytest.mark.xfail(strict=True)
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def inplace_incorrect_grad(pipe_class):
class M(nn.Module):
def forward(self, foo_bar):
# 'foo' requires grad but 'bar' does not. In-place operation on
# 'bar' won't cause a RuntimeError.
foo, bar = foo_bar
# add_(1) is not idempotent, in contrast to relu_(). If it is
# executed multiple times, it will accumulates each difference onto
# 'bar'.
bar.add_(1)
# 'bar' is still captured by checkpointing. 'foo' will get
# incorrect grad.
return foo * bar
model = nn.Sequential(M())
model = pipe_class(model, [1], worker_map=get_worker_map(), checkpoint="always")
foo = torch.tensor([1.0], requires_grad=True)
bar = torch.tensor([1.0])
output = model((foo, bar))
del model
output.backward()
# The gradient of 'foo' should be 2, but it is 3 actually because
# bar.add_(1) was executed twice due to checkpointing.
assert foo.grad.item() == 2.0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests/__init__.py makes pytest can import the application without custom sys.path or PYTHONPATH.
# See also: https://docs.pytest.org/en/latest/goodpractices.html
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
import torch.nn.functional as F
from fairscale.fair_dev.testing.testing import get_worker_map, torch_spawn
from fairscale.nn.pipe import AsyncPipe
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def python_autograd_function(pipe_class):
# FIXME deadlock with AsyncPipe?
# A Python autograd function might fail with this error:
#
# RuntimeError: Returning Variables sharing storage with other Variables
# that require grad is not supported in Python functions. Please submit a
# feature request if you hit this error.
#
# It doesn't look like an essential restriction. But it happens on the
# current PyTorch version. To avoid it, we should detach the tensor before
# returning by identity autograd functions, such as Wait, Fork, and Join.
torch.manual_seed(0)
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad):
return grad
class M(nn.Module):
def forward(self, input):
return Identity.apply(input)
model = nn.Sequential(M(), M())
model = pipe_class(model, [1, 1], worker_map=get_worker_map(), checkpoint="always").cuda()
model.eval()
x = torch.rand(42)
y = model(x)
if model.group.rank() == 1:
assert torch.allclose(x, y)
torch.distributed.rpc.shutdown()
torch.distributed.barrier()
@torch_spawn([3])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def exception_no_hang(pipe_class):
# In v0.0.2, once a failed partition receives a normal message
# (non-closing) for the next micro-batch, a hang occured. The reason was
# that a failed partition didn't call in_queue.task_done() on a normal
# message. So the former partition was blocked at out_queue.join() for the
# next of next micro-batch.
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Raise())
model = pipe_class(model, [1, 1, 1], worker_map=get_worker_map(), chunks=3)
model.eval()
if model.group.rank() == 2:
with pytest.raises(ExpectedException):
model(torch.rand(3))
else:
model(torch.rand(3))
torch.distributed.barrier()
@torch_spawn([2])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="2 cuda devices required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def tuple_wait(cuda_sleep, pipe_class):
# In v0.0.3, Wait is applied to only the first tensor on a micro-batch.
# Under this behavior, if checkpointing was disabled, there's a possibility
# that gradient accumulations on other tensors are not synchronized
# properly to the copy stream.
class Sleep(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.detach()
@staticmethod
def backward(ctx, grad):
with torch.cuda.device(grad.device):
cuda_sleep(0.05)
return grad
class Layer1(nn.Module):
def forward(self, pair):
a, b = pair
return a * 1, b * 2, b * 3
class Layer2(nn.Module):
def forward(self, triple):
a, b, c = triple
b = Sleep.apply(b)
return a + b + c
model = nn.Sequential(Layer1(), Layer2())
model = pipe_class(
model,
[1, 1],
worker_map=get_worker_map(),
input_device=torch.cuda.current_device(),
chunks=32,
checkpoint="never",
).cuda()
a = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
b = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True)
y = model((a, b))
if model.group.rank() == 1:
y.norm().backward()
else:
model.back_helper(y)
if model.group.rank() == 0:
assert torch.isclose(b.grad.norm().cpu(), torch.tensor(5.000))
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def parallel_randoms(pipe_class):
class Dropouts(nn.Module):
def forward(self, x):
for _ in range(100):
x = F.dropout(x, p=0.001)
return x
model = nn.Sequential(Dropouts(), Dropouts())
x = torch.rand(10, 10, requires_grad=True).cuda()
x.retain_grad()
model = pipe_class(
model,
[1, 1],
input_device=torch.cuda.current_device(),
worker_map=get_worker_map(),
chunks=10,
checkpoint="always",
).cuda()
y = model(x)
tensor_list = [torch.empty_like(x) for _ in range(2)]
if model.group.rank() == 1:
y.norm().backward()
torch.distributed.barrier()
tensor_list[model.group.rank()] = y
torch.distributed.all_gather(tensor_list, y, group=model.group)
assert tensor_list[0].to(torch.bool).tolist() == tensor_list[1].to(torch.bool).tolist()
else:
model.back_helper(y)
torch.distributed.barrier()
tensor_list[model.group.rank()] = x.grad
torch.distributed.all_gather(tensor_list, x.grad, group=model.group)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 Kakao Brain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from copy import deepcopy
import os
import time
import pytest
import torch
from torch import nn
from fairscale.fair_dev.testing.testing import get_worker_map, torch_spawn
from fairscale.internal import torch_version
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from fairscale.nn.pipe import AsyncPipe
from fairscale.nn.pipe.types import LazyModule
@torch_spawn([2])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def parameters(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
pipe = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1)
if torch.distributed.get_rank() == 0:
assert list(pipe.parameters()) != []
else:
assert list(pipe.parameters()) == []
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband():
if torch.distributed.get_rank() == 0:
t = torch.Tensor(range(100)).cuda()
torch.distributed.broadcast(t, 0)
else:
t = torch.empty(100).cuda()
torch.distributed.broadcast(t, 0)
assert torch.equal(t, torch.Tensor(range(100)).cuda())
print(f"t on {torch.distributed.get_rank()} is {t}")
@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband2():
if torch.distributed.get_rank() == 0:
t = torch.Tensor(range(100)).cuda()
torch.distributed.send(t, 1, group=get_pipeline_parallel_group())
else:
t = torch.empty(100).cuda()
torch.distributed.recv(t, 0, group=get_pipeline_parallel_group())
assert torch.equal(t, torch.Tensor(range(100)).cuda())
print(f"t on {torch.distributed.get_rank()} is {t}")
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def infiniband3():
t = torch.Tensor(range(100)).cuda()
torch.distributed.all_reduce(t, op=torch.distributed.ReduceOp.SUM)
assert torch.equal(t, torch.Tensor(range(0, 200, 2)).cuda())
@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def mpi():
seed = 1234
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.distributed.barrier()
tensor_size = (1024, 1024, 10)
torch.cuda.set_device(torch.distributed.get_rank()) # need to pin device or ucx gets unhappy
if torch.distributed.get_rank() == 0:
# t = torch.Tensor(range(10)).cuda(0)
t = torch.rand(*tensor_size).cuda(0)
torch.distributed.send(t, 1, tag=1234)
else:
t = torch.empty(*tensor_size).cuda(1)
torch.distributed.recv(t, 0, tag=1234)
t2 = torch.rand(*tensor_size).cuda(1)
assert torch.equal(t, t2)
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def public_attrs(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
pipe = pipe_class(
model,
balance=(1,),
worker_map=get_worker_map(),
chunks=42,
checkpoint="always",
)
assert pipe.balance == [1]
assert pipe.chunks == 42
assert isinstance(pipe.chunks, int)
assert pipe.checkpoint == "always"
assert isinstance(pipe.checkpoint, str)
@torch_spawn([2])
@pytest.mark.parametrize("balance", [[2], [1, 1]])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def sequential_like(balance, pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = pipe_class(model, balance, worker_map=get_worker_map())
if balance == [2]:
if torch.distributed.get_rank() == 0:
assert len(model) == 2
assert list(model) == [a, b]
assert model[0] is a
assert model[1] is b
with pytest.raises(IndexError):
_ = model[2]
assert model[-1] is b
assert model[-2] is a
else:
assert len(model) == 0
assert list(model) == []
else:
assert len(model) == 1
if torch.distributed.get_rank() == 0:
assert list(model) == [a]
assert model[0] is a
assert model[-1] is a
else:
assert list(model) == [b]
assert model[0] is b
assert model[-1] is b
with pytest.raises(IndexError):
_ = model[1]
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def balance_wrong_length(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
with pytest.raises(ValueError):
pipe_class(model, balance=[1], worker_map=get_worker_map())
with pytest.raises(ValueError):
pipe_class(model, balance=[3], worker_map=get_worker_map())
@torch_spawn([2])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def balance_less_than_1(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
with pytest.raises(ValueError):
pipe_class(model, balance=[0, 2], worker_map=get_worker_map())
with pytest.raises(ValueError):
pipe_class(model, balance=[-1, 3], worker_map=get_worker_map())
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def chunks_less_than_1(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError):
pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=0)
with pytest.raises(ValueError):
pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=-1)
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def too_few_devices(pipe_class):
model = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1))
with pytest.raises(IndexError):
# len(balance) > len(group.size())
model = pipe_class(model, balance=[1, 1, 1, 1], worker_map=get_worker_map())
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def batch_size_indivisible(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=4)
with pytest.warns(None) as record:
model(torch.rand(7, 1))
# Indivisible batch size is legal.
assert not record
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def batch_size_small(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=4)
with pytest.warns(None) as record:
model(torch.rand(2, 1))
# Batch size smaller than chunks is legal.
assert not record
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def checkpoint_mode(pipe_class):
def count_grad_fn(grad_fn, name, visited=set()):
if grad_fn in visited:
return 0
visited.add(grad_fn)
if grad_fn is None:
return 0
if grad_fn.__class__.__name__ == name:
return 1
counter = 0
for next_grad_fn, _ in grad_fn.next_functions:
counter += count_grad_fn(next_grad_fn, name, visited=visited)
return counter
model = nn.Sequential(nn.Linear(1, 1))
input = torch.rand(2, 1)
always = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
checkpoint="always",
)
except_last = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
checkpoint="except_last",
)
never = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
checkpoint="never",
)
always_output = always(input)
except_last_output = except_last(input)
never_output = never(input)
assert count_grad_fn(always_output.grad_fn, "CheckpointBackward") == 2
assert count_grad_fn(except_last_output.grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.grad_fn, "CheckpointBackward") == 0
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def checkpoint_mode_invalid(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
checkpoint="INVALID_CHECKPOINT",
)
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def checkpoint_mode_when_chunks_1(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
# All checkpoint modes are fine.
pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=1,
checkpoint="except_last",
)
pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint="always")
pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1, checkpoint="never")
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def checkpoint_eval(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
model = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
)
input = torch.rand(2, 1)
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.grad_fn, "RecomputeBackward")
@torch_spawn([2])
@pytest.mark.xfail(torch_version() < (1, 6, 0), reason="Doesn't work on torch < 1.6.0", strict=True)
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def checkpoint_non_float_input(pipe_class):
class ForkNonFloat(nn.Module):
def forward(self, input):
return (input * 2, torch.tensor([False]))
class JoinNonFloat(nn.Module):
def forward(self, input):
return input[0] * 2
model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
model = pipe_class(
model,
balance=[1, 1],
worker_map=get_worker_map(),
chunks=1,
checkpoint="always",
)
input = torch.rand(1, requires_grad=True)
output = model(input)
if model.group.rank() == 1:
# with torch.autograd.detect_anomaly():
output.backward()
torch.distributed.barrier()
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def no_grad(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=2)
input = torch.rand(2, 1)
latent = None
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partition
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def exception(pipe_class):
class ExpectedException(Exception):
pass
class Raise(nn.Module):
def forward(self, *_):
raise ExpectedException()
model = nn.Sequential(Raise())
model = pipe_class(model, balance=[1], worker_map=get_worker_map(), chunks=1)
with pytest.raises(ExpectedException):
model(torch.rand(1))
# FIXME(tom) should probably signal to all hosts in group to stop
@torch_spawn([4])
@pytest.mark.skipif(torch.cuda.is_available() and torch.cuda.device_count() < 4, reason="Not enough GPUs")
@pytest.mark.xfail(strict=True)
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def exception_early_stop_asap(pipe_class):
"""Even the first partitions have finished to process, the partition before
the failed partition hould be killed as soon as possible.
"""
class ExpectedExceptio(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
counter = 0
class Counter(nn.Module):
def forward(self, x):
time.sleep(0.1)
nonlocal counter
counter += 1
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
model = pipe_class(model, [1, 1, 1, 1], worker_map=get_worker_map(), chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
# If the early stop doesn't work, it would be 3 instead.
assert counter == 2
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def input_pair(pipe_class):
class Two(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, a_and_b):
a, b = a_and_b
return (self.fc_a(a), self.fc_b(b))
model = nn.Sequential(Two())
model = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
a_out, b_out = model((a, b))
loss = (a_out + b_out).mean()
loss.backward()
assert a.grad is not None
assert b.grad is not None
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def input_singleton(pipe_class):
class One(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, only_a):
(a,) = only_a
return (self.fc(a),)
model = nn.Sequential(One())
model = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
)
a = torch.rand(10, 1, requires_grad=True)
(a_out,) = model((a,))
loss = a_out.mean()
loss.backward()
assert all(p.grad is not None for p in model.parameters())
assert a.grad is not None
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def input_varargs(pipe_class):
model = nn.Sequential(nn.Linear(1, 1))
model = pipe_class(model, balance=[1], worker_map=get_worker_map())
a = torch.rand(1)
b = torch.rand(1)
# TypeError: forward() takes 2 positional arguments but 3 were given
with pytest.raises(TypeError):
model(a, b)
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def non_tensor(pipe_class):
class NonTensor(nn.Module):
def forward(self, _):
return "hello"
model = nn.Sequential(NonTensor())
model = pipe_class(model, balance=[1], worker_map=get_worker_map())
x = torch.rand(1)
# TypeError: expected Tensor as element 0 in argument 0, but got str
with pytest.raises(TypeError):
model(x)
# TypeError: expected Tensor to scatter, but got str
with pytest.raises(TypeError):
model("hello")
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def non_tensor_tuple(pipe_class):
class NonTensorTuple(nn.Module):
def forward(self, x):
return (x, "hello")
model = nn.Sequential(NonTensorTuple())
model = pipe_class(model, balance=[1], worker_map=get_worker_map())
x = torch.rand(1)
# TypeError: CheckpointBackward.forward: expected Variable (got str) for return value 1
with pytest.raises(TypeError):
model(x)
# TypeError: expected Tensor to scatter, but got str
with pytest.raises(TypeError):
model((x, "hello"))
@torch_spawn([1])
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
@pytest.mark.parametrize("lazy", [True, False])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def deferred_batch_norm(checkpoint, lazy, pipe_class):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe_fn = lambda: pipe_bn # noqa: E731
if lazy:
model = [LazyModule(pipe_fn)]
else:
model = nn.Sequential(pipe_bn)
pipe = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=2,
checkpoint=checkpoint,
deferred_batch_norm=True,
)
x = torch.rand(4, 3, 10, 10)
pipe(x).mean().backward()
bn(x).mean().backward()
assert torch.allclose(pipe[0].running_mean, bn.running_mean, atol=1e-4)
assert torch.allclose(pipe[0].running_var, bn.running_var, atol=1e-4)
@torch_spawn([1])
@pytest.mark.parametrize("checkpoint", ["never", "always"])
@pytest.mark.parametrize("lazy", [True, False])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def deferred_batch_norm_params(checkpoint, lazy, pipe_class):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe_fn = lambda: pipe_bn # noqa: E731
if lazy:
model = [LazyModule(pipe_fn)]
else:
model = nn.Sequential(pipe_bn)
pipe = pipe_class(
model,
balance=[1],
worker_map=get_worker_map(),
chunks=1,
checkpoint=checkpoint,
deferred_batch_norm=True,
)
x = torch.rand(4, 3, 10, 10)
pipe(x).mean().backward()
bn(x).mean().backward()
assert pipe[0].weight.grad is not None
assert pipe[0].bias.grad is not None
assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)
@torch_spawn([4])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def devices(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
c = nn.Linear(1, 1)
# There are extra two ranks.
model = nn.Sequential(a, b, c)
model = pipe_class(model, [1, 1, 1], worker_map=get_worker_map())
# Extra devices must be discarded.
if model.group.rank() == 3:
assert model.pipeline is None
@torch_spawn([2])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def partitions(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = pipe_class(model, [1, 1], worker_map=get_worker_map())
assert isinstance(model.partition, nn.Sequential)
if model.group.rank() == 0:
assert model[0].weight == a.weight
else:
assert model[0].weight == b.weight
@torch_spawn([2])
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def deny_moving(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = pipe_class(model, [1, 1], worker_map=get_worker_map())
model.cuda()
model.cpu()
model.to(torch.device("cuda"))
model.to(0)
model.to("cuda")
model.to(device=0)
model.to(torch.rand(1))
model.to(tensor=torch.rand(1))
# Casting is allowed.
model.half()
model.to(torch.double)
model.to(dtype=torch.float)
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def empty_module(pipe_class):
# Empty sequential module is not illegal.
model = nn.Sequential()
model = pipe_class(model, [], worker_map=get_worker_map())
assert model(torch.tensor([42])) == torch.tensor([42])
assert model((torch.tensor([42]),)) == (torch.tensor([42]),)
# But only tensor or tensors is legal in MultiProcessPipe.
with pytest.raises(TypeError):
model(42)
@torch_spawn([2])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
@pytest.mark.skip(reason="TODO(msb) handle named_children")
def named_children(pipe_class):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
model = pipe_class(model, [1, 1], worker_map=get_worker_map())
names = set(n for n, _ in model.named_modules())
if model.group.rank() == 0:
assert "0.a" in names
else:
assert "0.b" in names
# MultiProcessPipe doesn't support __getattr__. Unlike nn.Sequential, MultiProcessPipe requires
# several methods in its namespace.
with pytest.raises(AttributeError):
model.a
@torch_spawn([1])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def recommend_auto_balance(pipe_class):
with pytest.raises(ValueError):
# module and sum of balance have differen length (module: 0, sum of balance: 1)
pipe_class(nn.Sequential(), [1])
with pytest.raises(ValueError):
# module and sum of balance have different length (module: 2, sum of balance: 1)
pipe_class(nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1)), [1])
@torch_spawn([2])
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def lazy_construction(pipe_class):
init_count = 0
class Custom(nn.Module):
def __init__(self):
super(Custom, self).__init__()
nonlocal init_count
init_count += 1
def forward(self, x):
return x
model = [
LazyModule(lambda: Custom()),
LazyModule(lambda: Custom()),
LazyModule(lambda: Custom()),
LazyModule(lambda: Custom()),
]
pipe = pipe_class(model, balance=[2, 2], worker_map=get_worker_map())
assert isinstance(pipe[0], Custom)
assert isinstance(pipe[1], Custom)
assert len(pipe) == 2
assert init_count == 2
@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" in os.environ, reason="doesn't apply to mpi")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def missing_worker_map(pipe_class):
model = nn.Sequential(nn.ReLU(), nn.ReLU())
with pytest.raises(ValueError, match="'RpcTransport' requires 'worker_map' to be set"):
pipe_class(model, [1, 1])
@torch_spawn([2])
@pytest.mark.skip(reason="currently broken")
@pytest.mark.parametrize("pipe_class", [AsyncPipe])
def verify_module_duplicate_parameters_on_distinct_partitions(pipe_class):
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
# FIXME(tom) can't have duplicate params with separate processes
with pytest.raises(ValueError, match="module with duplicate parameters on distinct devices is not supported"):
pipe_class(model, [1, 1], worker_map=get_worker_map())
@torch_spawn([4])
def async_event_loop():
model = nn.Sequential(nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10), nn.ReLU())
pipe = AsyncPipe(model, [1, 1, 1, 1], worker_map=get_worker_map(), chunks=10)
inputs = torch.rand(100, 10)
output = pipe(inputs)
if pipe.final_stage:
loss = output.mean()
loss.backward()
|
import copy
import os
import pytest
import torch
from torch import nn
from torch.distributed import rpc
from fairscale.fair_dev.testing.testing import get_worker_map, torch_spawn
from fairscale.internal import torch_version
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from fairscale.nn.pipe import PipeRPCWrapper
def init_rpc():
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
rpc.init_rpc(
f"Test{torch.distributed.get_rank()}",
rank=torch.distributed.get_rank(),
world_size=torch.distributed.get_world_size(),
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(init_method=init_method),
)
@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def basic_rpc():
init_rpc()
if torch.distributed.get_rank() != 0:
rpc.shutdown()
torch.distributed.barrier()
return
model = [nn.Linear(10, 10), nn.ReLU()]
pipe = PipeRPCWrapper(model, [1, 1], input_device=torch.cuda.current_device(), worker_map=get_worker_map())
pipe.foreach_worker(register_optimizer, include_self=True)
inputs = torch.rand(10).cuda()
output = pipe(inputs)
loss = output.mean()
loss.backward()
pipe.foreach_worker(step_optimizer, include_self=True)
pipe.eval()
rpc.shutdown()
torch.distributed.barrier()
def register_optimizer(ctx, model):
if len(list(model.parameters())) > 0:
model.optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
else:
model.optimizer = None
def step_optimizer(ctx, model):
if model.optimizer:
model.optimizer.step()
def check_pipe_against_reference(balance, model_constructor, checkpoint="except_last", custom_inputs=None):
model = model_constructor()
reference_model = model_constructor()
for src, dst in zip(model, reference_model):
dst.load_state_dict(copy.deepcopy(src.state_dict()))
reference_model = nn.Sequential(*reference_model).cuda()
pipe = PipeRPCWrapper(
model,
balance,
input_device=torch.cuda.current_device(),
worker_map=get_worker_map(),
checkpoint=checkpoint,
)
pipe.foreach_worker(register_optimizer, include_self=True)
register_optimizer(None, reference_model)
inputs = torch.rand(10).cuda()
target = torch.rand(10).cuda()
cloned = inputs.clone()
output = pipe(inputs)
ref_out = reference_model(inputs)
assert torch.equal(ref_out.cpu(), output.cpu())
for out in output, ref_out:
target = target.to(out.device)
loss = nn.MSELoss()(out, target)
loss.backward()
pipe.foreach_worker(step_optimizer, include_self=True)
step_optimizer(None, reference_model.cuda())
pipe.eval()
reference_model.eval()
final_output = pipe(inputs)
final_ref = reference_model(inputs.cuda())
assert torch.equal(final_output.cpu(), final_ref.cpu())
@torch_spawn([3])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def rpc_optimizer():
init_rpc()
if torch.distributed.get_rank() != 0:
rpc.shutdown()
torch.distributed.barrier()
return
def model_with_reuse():
reused_1 = nn.Linear(10, 10)
return [reused_1, nn.ReLU(), reused_1, nn.ReLU(), reused_1, nn.ReLU()]
check_pipe_against_reference(
[2, 2, 2],
lambda: [nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10), nn.ReLU()],
)
check_pipe_against_reference([2, 1, 1], model_with_reuse)
rpc.shutdown()
torch.distributed.barrier()
@torch_spawn([6])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def rpc_megatron_reuse():
from fairscale.nn.model_parallel import layers
from fairscale.nn.model_parallel.initialize import destroy_model_parallel, initialize_model_parallel
def make_model_simple():
return [
layers.ColumnParallelLinear(10, 10),
nn.ReLU(),
layers.RowParallelLinear(10, 10),
nn.ReLU(),
layers.ColumnParallelLinear(10, 10),
nn.ReLU(),
layers.RowParallelLinear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
]
def make_model_with_reuse():
column = layers.ColumnParallelLinear(10, 10)
row = layers.RowParallelLinear(10, 10)
return [
column,
nn.ReLU(),
row,
nn.ReLU(),
column,
nn.ReLU(),
row,
nn.ReLU(),
nn.Linear(10, 10),
nn.ReLU(),
]
destroy_model_parallel()
torch.distributed.destroy_process_group()
torch.distributed.init_process_group("gloo", rank=int(os.environ["RANK"]), world_size=int(os.environ["WORLD_SIZE"]))
initialize_model_parallel(2, 3, model_parallel_backend="nccl", pipeline_backend="mpi")
init_rpc()
if get_pipeline_parallel_group().rank() != 0:
rpc.shutdown()
torch.distributed.barrier()
return
check_pipe_against_reference([4, 4, 2], make_model_simple, "always")
check_pipe_against_reference([4, 2, 2], make_model_with_reuse)
rpc.shutdown()
torch.distributed.barrier()
@torch_spawn([3])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def rpc_reuse_in_final_stage():
# 'reused' and 'reused2' are located on stage 2, so the backward pass for
# the final stage will need to first send gradients to stage 2, then receive
# gradients from stage 2. This tests custom logic to handle reuse of layers
# in the final stage of the pipeline.
reused = nn.Linear(10, 10)
reused2 = nn.Linear(10, 10)
model = [
nn.Linear(10, 10),
nn.ReLU(),
nn.Linear(10, 10),
reused2,
nn.ReLU(),
reused,
nn.ReLU(),
reused,
reused2,
nn.ReLU(),
reused,
nn.ReLU(),
]
balance = [2, 3, 4]
init_rpc()
if torch.distributed.get_rank() != 0:
rpc.shutdown()
torch.distributed.barrier()
return
pipe = PipeRPCWrapper(model, balance, worker_map=get_worker_map())
inputs = torch.rand(10).cuda()
target = torch.rand(10).cuda()
output = pipe(inputs)
nn.MSELoss()(output, target).backward()
output = pipe(inputs)
nn.MSELoss()(output, target).backward()
rpc.shutdown()
torch.distributed.barrier()
@torch_spawn([3])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="mpi required")
def rpc_multiple_tensors():
class FuseTwo(nn.Module):
def forward(self, left, right):
return left + right
class SplitTwo(nn.Module):
def forward(self, inputs):
return (inputs, 2 * inputs)
@torch_spawn([2])
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" in os.environ, reason="no mpi")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
# TODO(msb) Fix this
@pytest.mark.skipif(torch_version() >= (1, 8, 0), reason="disabled for torch 1.8.0")
def construct_only_rank_zero():
model = [nn.Linear(10, 10), nn.ReLU()]
if torch.distributed.get_rank() == 0:
PipeRPCWrapper(model, [1, 1], worker_map=get_worker_map())
rpc.shutdown()
else:
# Must enter rpc loop to complte PipeRPCWrapper constructor above
rpc.shutdown()
with pytest.raises(AssertionError):
PipeRPCWrapper(model, [1, 1], worker_map=get_worker_map())
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from fairscale.fair_dev.testing.testing import IdentityLayer, dist_init, set_random_seed, spawn_for_all_world_sizes
from fairscale.nn.model_parallel import initialize as mpu
from fairscale.nn.model_parallel.cross_entropy import vocab_parallel_cross_entropy
from fairscale.nn.model_parallel.mappings import scatter_to_model_parallel_region
def torch_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size), scale=logits_scale).cuda()
logits = identity()
target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
loss = F.cross_entropy(logits.view(-1, logits.size()[-1]), target.view(-1), reduction="none").view_as(target).mean()
loss.backward()
return loss, identity.weight.grad
def mpu_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed):
set_random_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size), scale=logits_scale).cuda()
logits = identity()
logits_parallel = scatter_to_model_parallel_region(logits)
target = torch.cuda.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size)
loss = vocab_parallel_cross_entropy(logits_parallel, target).mean()
loss.backward()
return loss, identity.weight.grad
def run_test_cross_entropy(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing cross entropy with model parallel size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
batch_size = 13
seq_length = 17
vocab_size_per_partition = 11
logits_scale = 1000.0
vocab_size = vocab_size_per_partition * model_parallel_size
seed = 1234
loss_torch, grad_torch = torch_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed)
loss_mpu, grad_mpu = mpu_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed)
error = loss_torch.sub_(loss_mpu).abs().max()
print(" max error in loss on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6
error = grad_torch.sub_(grad_mpu).abs().max()
print(" max error in grad on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_cross_entropy():
spawn_for_all_world_sizes(run_test_cross_entropy)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
import torch.nn.init as init
from torch.nn.parameter import Parameter
from fairscale.fair_dev.testing.testing import dist_init, set_random_seed, spawn_for_all_world_sizes
from fairscale.nn.model_parallel import initialize as mpu
from fairscale.nn.model_parallel import layers
def run_test_parallel_embedding(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing parallel embedding with model parallel size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
batch_size = 17
seq_length = 23
vocab_size = 48
hidden_size = 16
seed = 1236
set_random_seed(123)
input_data = torch.LongTensor(size=(batch_size, seq_length)).random_(0, vocab_size).cuda()
loss_weight = torch.randn([batch_size, seq_length, hidden_size]).cuda()
set_random_seed(seed)
embedding_original = torch.nn.Embedding(vocab_size, hidden_size).cuda()
output = embedding_original(input_data)
loss_original = torch.mul(output, loss_weight).sum()
loss_original.backward()
set_random_seed(seed)
embedding_parallel = layers.ParallelEmbedding(vocab_size, hidden_size, init_method=init.normal_).cuda()
output = embedding_parallel(input_data)
loss_parallel = torch.mul(output, loss_weight).sum()
loss_parallel.backward()
set_random_seed(seed)
embedding_vocab_parallel = layers.VocabParallelEmbedding(vocab_size, hidden_size, init_method=init.normal_).cuda()
output = embedding_vocab_parallel(input_data)
loss_vocab_parallel = torch.mul(output, loss_weight).sum()
loss_vocab_parallel.backward()
torch.distributed.barrier()
error = loss_parallel.sub(loss_original).abs()
print(" error in loss (parallel) on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-12, "error: {}".format(error)
torch.distributed.barrier()
error = loss_vocab_parallel.sub(loss_original).abs()
print(" error in loss (vocab parallel) on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-12, "error: {}".format(error)
weight_grad_orig = torch.split(embedding_original.weight.grad, hidden_size // model_parallel_size, 1)[
mpu.get_model_parallel_rank()
]
error = embedding_parallel.weight.grad.sub(weight_grad_orig).abs().max()
print(" error in grad (parallel) on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-12, "error: {}".format(error)
weight_grad_orig = torch.split(embedding_original.weight.grad, vocab_size // model_parallel_size, 0)[
mpu.get_model_parallel_rank()
]
error = embedding_vocab_parallel.weight.grad.sub(weight_grad_orig).abs().max()
print(" error in grad (vocab parallel) on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-12, "error: {}".format(error)
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_initialize_affine_weight(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
mpu.initialize_model_parallel(model_parallel_size)
if torch.distributed.get_rank() == 0:
print("> testing initialize_affine_weight with model parallel size: {}".format(model_parallel_size))
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
input_size_coeff = 13
input_size = input_size_coeff * model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * model_parallel_size
# ---------------
# Column parallel
# ---------------
weight = torch.empty(output_size_coeff, input_size)
set_random_seed(seed)
layers._initialize_affine_weight(weight, output_size, input_size, output_size_coeff, 0, torch.nn.init.normal_)
# Target.
set_random_seed(seed)
master_weight = torch.empty(output_size, input_size)
torch.nn.init.normal_(master_weight)
rank = mpu.get_model_parallel_rank()
my_weight = torch.split(master_weight, output_size_coeff, dim=0)[rank].contiguous().clone()
# Compare.
error = weight.sub(my_weight).abs().max()
torch.distributed.barrier()
print(
" column parallel max error (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6, error
# ------------
# Row parallel
# ------------
weight = torch.empty(output_size, input_size_coeff)
set_random_seed(seed)
layers._initialize_affine_weight(weight, output_size, input_size, input_size_coeff, 1, torch.nn.init.normal_)
# Target.
set_random_seed(seed)
master_weight = torch.empty(output_size, input_size)
torch.nn.init.normal_(master_weight)
rank = mpu.get_model_parallel_rank()
my_weight = torch.split(master_weight, input_size_coeff, dim=1)[rank].contiguous().clone()
# Compare.
error = weight.sub(my_weight).abs().max()
torch.distributed.barrier()
print(
" row parallel max error (should be zero) on global rank {}: {}".format(torch.distributed.get_rank(), error)
)
assert error < 1.0e-6, error
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
class IdentityLayer2D(torch.nn.Module):
def __init__(self, m, n):
super(IdentityLayer2D, self).__init__()
self.weight = Parameter(torch.Tensor(m, n))
torch.nn.init.xavier_normal_(self.weight)
def forward(self):
return self.weight
def run_test_column_parallel_linear(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
mpu.initialize_model_parallel(model_parallel_size)
if torch.distributed.get_rank() == 0:
print("> testing ColumnParallelLinear with model parallel size: {}".format(model_parallel_size))
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
input_size_coeff = 13
input_size = input_size_coeff * model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * model_parallel_size
batch_size = 7
# Network
identity_layer = IdentityLayer2D(batch_size, input_size).cuda()
linear_layer = layers.ColumnParallelLinear(input_size, output_size, keep_master_weight_for_test=True).cuda()
loss_weight = torch.randn([batch_size, output_size]).cuda()
# Forward
input_ = identity_layer()
output = linear_layer(input_)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
# Values.
dLdY = loss_weight
X = identity_layer.weight
A = linear_layer.master_weight.cuda()
dLdA = torch.matmul(dLdY.t(), X)
dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)
dLdX = torch.matmul(dLdY, A)
rank = mpu.get_model_parallel_rank()
my_dLdA = torch.split(dLdA, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdA.sub(linear_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdA on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
my_dLdb = torch.split(dLdb, output_size_coeff, dim=0)[rank].contiguous().clone()
error = my_dLdb.sub(linear_layer.bias.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdb on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
error = dLdX.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdX on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
def run_test_row_parallel_linear(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
mpu.initialize_model_parallel(model_parallel_size)
if torch.distributed.get_rank() == 0:
print("> testing RowParallelLinear with model parallel size: {}".format(model_parallel_size))
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
input_size_coeff = 13
input_size = input_size_coeff * model_parallel_size
output_size_coeff = 17
output_size = output_size_coeff * model_parallel_size
batch_size = 7
# Network
identity_layer = IdentityLayer2D(batch_size, input_size).cuda()
linear_layer = layers.RowParallelLinear(input_size, output_size, keep_master_weight_for_test=True).cuda()
loss_weight = torch.randn([batch_size, output_size]).cuda()
# Forward
input_ = identity_layer()
output = linear_layer(input_)
loss = torch.mul(output, loss_weight).sum()
# Backward
loss.backward()
# Values.
dLdY = loss_weight
X = identity_layer.weight
A = linear_layer.master_weight.cuda()
dLdA = torch.matmul(dLdY.t(), X)
dLdb = torch.matmul(torch.ones(batch_size, 1).cuda().t(), dLdY).view(-1)
dLdX = torch.matmul(dLdY, A)
rank = mpu.get_model_parallel_rank()
my_dLdA = torch.split(dLdA, input_size_coeff, dim=1)[rank].contiguous().clone()
error = my_dLdA.sub(linear_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdA on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
error = dLdb.sub(linear_layer.bias.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdb on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
error = dLdX.sub(identity_layer.weight.grad).abs().max()
torch.distributed.barrier()
print(" error in dLdX on global rank {}: {}".format(torch.distributed.get_rank(), error))
assert error < 1.0e-6, error
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(" >> passed the test :-)")
def test_affine_weight():
spawn_for_all_world_sizes(run_test_initialize_affine_weight, deterministic=True)
def test_embedding():
spawn_for_all_world_sizes(run_test_parallel_embedding, deterministic=True)
def test_column_parallel():
spawn_for_all_world_sizes(run_test_column_parallel_linear, deterministic=True)
@pytest.mark.skipif("OMPI_COMM_WORLD_RANK" not in os.environ, reason="only works on mpi")
def test_row_parallel():
spawn_for_all_world_sizes(run_test_row_parallel_linear, deterministic=True)
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from fairscale.fair_dev.testing.testing import dist_init, spawn_for_all_world_sizes
from fairscale.nn.model_parallel import initialize as mpu
def run_test_initialize_model_parallel(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing initialize_model_parallel with size {} ...".format(model_parallel_size))
model_parallel_size_ = min(model_parallel_size, torch.distributed.get_world_size())
assert not mpu.model_parallel_is_initialized()
mpu.initialize_model_parallel(model_parallel_size_)
assert mpu.model_parallel_is_initialized()
# Checks.
def check(group, world_size, rank):
assert world_size == torch.distributed.get_world_size(group=group)
assert rank == torch.distributed.get_rank(group=group)
# Model parallel.
world_size = model_parallel_size_
rank = torch.distributed.get_rank() % model_parallel_size_
assert world_size == mpu.get_model_parallel_world_size()
assert rank == mpu.get_model_parallel_rank()
check(mpu.get_model_parallel_group(), world_size, rank)
# Data parallel.
world_size = torch.distributed.get_world_size() // model_parallel_size_
rank = torch.distributed.get_rank() // model_parallel_size
assert world_size == mpu.get_data_parallel_world_size()
assert rank == mpu.get_data_parallel_rank()
check(mpu.get_data_parallel_group(), world_size, rank)
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_get_model_parallel_src_rank(rank, model_parallel_size_, filename, filename_rpc):
dist_init(rank, model_parallel_size_, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing get_model_parallel_src_rank with size {} ...".format(model_parallel_size_))
model_parallel_size = min(model_parallel_size_, torch.distributed.get_world_size())
assert not mpu.model_parallel_is_initialized()
mpu.initialize_model_parallel(model_parallel_size)
assert mpu.model_parallel_is_initialized()
# Checks
src_rank = torch.distributed.get_rank() - mpu.get_model_parallel_rank()
assert mpu.get_model_parallel_src_rank() == src_rank
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_initialize_model_parallel():
spawn_for_all_world_sizes(run_test_initialize_model_parallel)
def test_get_model_parallel_src_rank():
spawn_for_all_world_sizes(run_test_get_model_parallel_src_rank)
def test_adjacency(monkeypatch):
new_groups = []
data_parallel_size = 32
pipeline_length = 8
model_parallel_size = 4
class MockDistribued:
def get_rank(self):
return 0
def is_initialized(self):
return True
def get_world_size(self):
return data_parallel_size * pipeline_length * model_parallel_size
def new_group(self, args, backend=None):
new_groups.append(args.copy())
return ()
monkeypatch.setattr(torch, "distributed", MockDistribued())
mpu.initialize_model_parallel(model_parallel_size, pipeline_length)
from collections import defaultdict
buckets = defaultdict(list)
for group in new_groups:
buckets[len(group)].append(group)
assert sorted(list(buckets.keys())) == [model_parallel_size, pipeline_length, data_parallel_size]
assert len(buckets[model_parallel_size]) == pipeline_length * data_parallel_size
assert len(buckets[data_parallel_size]) == model_parallel_size * pipeline_length
assert len(buckets[pipeline_length]) == model_parallel_size * data_parallel_size
# Check that model_parallel groups are contiguous
for group in buckets[model_parallel_size]:
assert sorted(group) == group
assert list(range(group[0], group[-1] + 1)) == group
|
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from fairscale.fair_dev.testing.testing import dist_init, spawn_for_all_world_sizes
from fairscale.nn.model_parallel import initialize as mpu
from fairscale.nn.model_parallel import random
from fairscale.nn.model_parallel.random import get_cuda_rng_tracker, model_parallel_cuda_manual_seed
def run_test_set_cuda_rng_state(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing set_rng_state with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
size = 123
seed = 1234
torch.cuda.manual_seed(1234)
tensor = torch.cuda.FloatTensor(size)
# Get the state
rng_state = torch.cuda.get_rng_state()
rng_state_copy = rng_state.clone()
# Do some stuff.
for _ in range(5):
torch.randn(size, out=tensor)
result_1 = tensor.clone()
assert rng_state.sub(rng_state_copy).max() == 0
assert torch.cuda.get_rng_state().sub(rng_state_copy).max() > 0
# State should be different.
new_rng_state = torch.cuda.get_rng_state()
max_diff = new_rng_state.sub(rng_state).max()
print(
" max diff in rng state (should be non-zero) on global rank {}: {}".format(
torch.distributed.get_rank(), max_diff
)
)
assert max_diff > 0
# Reset the rng state and do the same stuff.
random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
random._set_cuda_rng_state(rng_state)
for _ in range(5):
torch.randn(size, out=tensor)
result_2 = tensor.clone()
# Results should be the same
error = result_2.sub(result_1).abs().max()
print(
" max error in generated tensors (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Input state should have remained intact.
error = rng_state.sub(rng_state_copy).max()
print(
" max error in rng state (should be zero) on global rank {}: {}".format(torch.distributed.get_rank(), error)
)
assert error == 0
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_cuda_rng_tracker(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing cuda rng tracker with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
seed_1 = 1234
seed_2 = 4321
size = [12, 21]
tensor = torch.cuda.FloatTensor(size)
# Set to seed_1 and generate two tensors.
torch.cuda.manual_seed(seed_1)
torch.randn(size, out=tensor)
target_11 = tensor.clone()
torch.randn(size, out=tensor)
target_12 = tensor.clone()
# Set to seed_2 and generate two tensors.
torch.cuda.manual_seed(seed_2)
torch.randn(size, out=tensor)
target_21 = tensor.clone()
torch.randn(size, out=tensor)
target_22 = tensor.clone()
# Now if we interleave seed_1 and seed_2,
# we should still get the same tensors
torch.cuda.manual_seed(seed_1)
get_cuda_rng_tracker().add("test", seed_2)
torch.randn(size, out=tensor)
result_11 = tensor.clone()
with get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_21 = tensor.clone()
torch.randn(size, out=tensor)
result_12 = tensor.clone()
with get_cuda_rng_tracker().fork("test"):
torch.randn(size, out=tensor)
result_22 = tensor.clone()
diff = result_11.sub(result_21).abs().max()
diff = min(diff, result_12.sub(result_22).abs().max())
print(
" max diff in generated tensors (should be non-zero) on global rank {}: {}".format(
torch.distributed.get_rank(), diff
)
)
assert diff > 1.0e-6
error = max(result_11.sub(target_11).abs().max(), result_12.sub(target_12).abs().max())
error = max(error, result_21.sub(target_21).abs().max())
error = max(error, result_22.sub(target_22).abs().max())
print(
" max error in generated tensors (should be zero) on global rank {}: {}".format(
torch.distributed.get_rank(), error
)
)
assert error < 1.0e-6
# Reset the tracker
get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def run_test_model_parallel_cuda_manual_seed(rank, model_parallel_size, filename, filename_rpc):
dist_init(rank, model_parallel_size, filename, filename_rpc)
if torch.distributed.get_rank() == 0:
print("> testing model parallel cuda manual seed with size {} ...".format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
model_parallel_cuda_manual_seed(12345)
assert torch.cuda.initial_seed() == 12345
with get_cuda_rng_tracker().fork():
assert torch.cuda.initial_seed() == (12345 + 2718 + mpu.get_model_parallel_rank())
# Reset the tracker
get_cuda_rng_tracker().reset()
# Reset groups
mpu.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(">> passed the test :-)")
def test_set_cuda_rng_state():
spawn_for_all_world_sizes(run_test_set_cuda_rng_state)
def test_cuda_rng_tracker():
spawn_for_all_world_sizes(run_test_cuda_rng_tracker)
def test_model_parallel_cuda_manual_seed():
spawn_for_all_world_sizes(run_test_model_parallel_cuda_manual_seed)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
import random
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairscale.fair_dev.testing.testing import DummyProcessGroup
from fairscale.nn import FullyShardedDataParallel as FSDP
from fairscale.nn import auto_wrap, default_auto_wrap_policy, enable_wrap, wrap
try:
from torch.cuda.amp import autocast
except ImportError:
autocast = None # type: ignore
class TestAutoWrap(unittest.TestCase):
def setUp(self) -> None:
# For all the tests here, we use a fake group and flatten being False since those should
# not affect how wrapping work.
self.process_group = DummyProcessGroup(rank=0, size=1)
def test_wrap(self):
with enable_wrap(wrapper_cls=FSDP, flatten_parameters=False, process_group=self.process_group):
layer = wrap(nn.Linear(5, 5))
assert isinstance(layer, FSDP)
assert layer.flatten_parameters is False
def test_wrap_disabled_outside_context(self):
layer = wrap(nn.Linear(5, 5))
assert isinstance(layer, nn.Linear)
def test_wrap_override_defaults(self):
with enable_wrap(wrapper_cls=FSDP, flatten_parameters=False, process_group=self.process_group):
layer = wrap(nn.Linear(5, 5), flatten_parameters=True)
assert isinstance(layer, FSDP)
assert layer.flatten_parameters
def test_auto_wrap(self):
"""
Test to ensure with auto wrap, we wrap child modules correctly based on the min_num_params.
``nn.Linear(5, 5)`` does not exceed the bucket size, but combined they do.
Root is not wrapped given there are not enough unwrapped params left and skip_params_check_for_root
is not set.
"""
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
sequential = nn.Sequential(nn.Linear(5, 5), nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5)))
my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=60)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
assert isinstance(model, nn.Sequential)
assert isinstance(model[0], nn.Linear)
assert isinstance(model[1], FSDP)
assert isinstance(model[1].module[0], nn.Linear)
assert isinstance(model[1].module[1], nn.Linear)
def test_auto_wrap_skip_root_checks(self):
"""
Similar test as before but this time we set skip_params_check_for_root=True in the wrap policy.
So in this case the root is wrapped even without enough remaining unwrapped params.
"""
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
sequential = nn.Sequential(nn.Linear(5, 5), nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5)))
my_auto_wrap_policy = functools.partial(
default_auto_wrap_policy, min_num_params=60, skip_params_check_for_root=True
)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
assert isinstance(model, FSDP)
assert isinstance(model.module[0], nn.Linear)
assert isinstance(model.module[1], FSDP)
assert isinstance(model.module[1].module[0], nn.Linear)
assert isinstance(model.module[1].module[1], nn.Linear)
def test_auto_wrap_preset_exclude_wrap(self):
"""
Test to ensure excluded modules are not wrapped, regardless if the total param size is greater than the
min_num_params.
"""
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
sequential = nn.ModuleList([nn.Linear(5, 5), nn.Linear(5, 5)])
my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
assert isinstance(model, nn.ModuleList)
assert isinstance(model[0], nn.Linear)
assert isinstance(model[1], nn.Linear)
def test_auto_wrap_preset_exclude_wrap_include_children(self):
"""
Test to ensure excluded modules are not wrapped, but children are if param size is greater than
min_num_params
"""
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
sequential = nn.ModuleList([nn.Linear(10, 10)])
my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
assert isinstance(model, nn.ModuleList)
assert isinstance(model[0], FSDP)
def test_auto_wrap_preset_force_leaf(self):
"""
Test to ensure force-leaf modules are not wrapped, and children are not wrapped.
"""
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
sequential = nn.Sequential(nn.Linear(10, 10), nn.MultiheadAttention(100, 1))
my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
assert isinstance(model.module[0], FSDP)
# Assert children of multihead attention are not wrapped
assert isinstance(model.module[1], nn.MultiheadAttention)
assert isinstance(model.module[1].out_proj, nn.Linear)
def test_auto_wrap_preset_force_leaf_custom(self):
"""
Test to ensure force-leaf modules are not wrapped.
"""
my_auto_wrap_policy = functools.partial(
default_auto_wrap_policy,
min_num_params=40,
force_leaf_modules=default_auto_wrap_policy.FORCE_LEAF_MODULES.union({nn.Linear}),
)
with enable_wrap(
auto_wrap_policy=my_auto_wrap_policy,
wrapper_cls=FSDP,
process_group=self.process_group,
flatten_parameters=False,
):
sequential = nn.Sequential(nn.Linear(10, 10), nn.ModuleList([nn.Linear(10, 10)]))
model = auto_wrap(sequential)
# Model was wrapped in FSDP as no inner modules were wrapped.
assert isinstance(model, FSDP)
assert isinstance(model.module[0], nn.Linear)
assert isinstance(model.module[1], nn.ModuleList)
# todo: currently complains that address is in use, not sure why since I clear the proc group.
# def test_auto_wrap_smoke(self):
# self._auto_wrap_smoke_test(enable_mixed_precision=False)
def test_auto_wrap_smoke_autocast(self):
"""
Ensure we can do a forward/backward through an auto-wrapped model.
"""
self._auto_wrap_smoke_test(enable_mixed_precision=True)
@unittest.skipIf(not torch.cuda.is_available(), "Test Requires CUDA")
@unittest.skipIf(autocast is None, "Test Requires autocast")
def _auto_wrap_smoke_test(self, enable_mixed_precision):
device = torch.device("cuda")
torch.cuda.set_device(0)
# Random port in case the next test run quickly, same port would cause conflict.
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(random.randint(2000, 3000))
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
try:
with enable_wrap(wrapper_cls=FSDP, mixed_precision=enable_mixed_precision):
sequential = nn.Sequential(
nn.Linear(5, 5), nn.Linear(5, 5), nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5))
)
my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
model.to(device)
input = torch.rand((1, 5), dtype=torch.float).to(device)
with autocast(enabled=enable_mixed_precision):
output = model(input)
loss = F.mse_loss(input, output)
loss.backward()
finally:
torch.distributed.destroy_process_group()
del os.environ["MASTER_ADDR"]
del os.environ["MASTER_PORT"]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from fairscale.nn import Top2Gate
from fairscale.nn.moe.top2gate import top2gating
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_create():
gate = Top2Gate(4, 8)
@skip_if_no_cuda
def test_create_cuda():
gate = Top2Gate(4, 8).cuda()
def do_test_forward(device):
torch.manual_seed(3)
input = torch.randn(12, 4).to(device)
gate = Top2Gate(4, 6).to(device)
capacity = 2 * 12 // 6
l_aux, combine_weights, dispatch_mask = gate(input)
assert pytest.approx(l_aux.item(), rel=0.01) == 0.0267, l_aux
assert combine_weights.shape == (12, 6, 4)
assert dispatch_mask.shape == (12, 6, 4)
assert torch.equal(combine_weights.bool(), dispatch_mask)
assert torch.all(torch.sum(dispatch_mask, axis=(0, 2)) <= capacity)
assert torch.all(combine_weights >= 0.0)
assert torch.all(combine_weights <= 1.0)
weights_sum = torch.sum(combine_weights).item()
assert round(weights_sum) == pytest.approx(weights_sum), weights_sum
# For this random seed, we get 12 slots filled.
assert weights_sum == pytest.approx(12.0), weights_sum
def test_forward_cpu():
do_test_forward("cpu")
@skip_if_no_cuda
def test_forward_cuda():
do_test_forward("cuda")
# Verify that top gate is allocated capacity as per Algorithm 1 in GShard paper.
def test_expert1_overflow():
num_tokens = 8
num_experts = 4
logits = torch.randn(num_tokens, num_experts)
logits[:, 0] = torch.max(logits, dim=1).values + 1 # Force overflow
top1s = torch.argmax(logits, dim=1)
assert top1s.eq(0).all(), top1s
_, __, dispatch_mask = top2gating(logits)
capacity = 2 * num_tokens // num_experts
for i in range(num_tokens):
if i < capacity:
assert dispatch_mask[i][0][i]
else:
assert not dispatch_mask[i][0].any()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import functools
import tempfile
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from fairscale.fair_dev.testing.testing import make_cudnn_deterministic
from fairscale.internal import torch_version
from fairscale.nn import MOELayer, Top2Gate
pytestmark = pytest.mark.skipif(
not (torch.cuda.is_available() and torch_version() >= (1, 8, 0)), reason="cuda and torch>=1.8.0 required"
)
devices = ["cuda"]
def pg_worker(rank, world_size, init_file, func, *args):
init_url = "file://" + init_file
dist.init_process_group(backend=dist.Backend.NCCL, rank=rank, world_size=world_size, init_method=init_url)
torch.cuda.set_device(rank)
dist.all_reduce(torch.zeros(1).cuda())
func(*args)
dist.destroy_process_group()
def pg_test(world_size=torch.cuda.device_count()):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
tempfile_name = tempfile.mkstemp()[1]
mp.spawn(pg_worker, args=(world_size, tempfile_name, func, *kwargs.values()), nprocs=world_size)
globals()["test_" + func.__name__] = wrapper
return func
return decorator
@pg_test(world_size=1)
@pytest.mark.parametrize("device", devices)
def create(device):
model_dim = 8
num_experts = 4
gate = Top2Gate(model_dim, num_experts)
expert = torch.nn.Linear(model_dim, model_dim)
moe = MOELayer(gate, expert).to(device)
@pg_test(world_size=1)
@pytest.mark.parametrize("device", devices)
def expert_params(device):
model_dim = 8
num_experts = 4
gate = Top2Gate(model_dim, num_experts)
expert = torch.nn.Linear(model_dim, model_dim)
moe = MOELayer(gate, expert).to(device)
for p in expert.parameters():
assert p.expert is True, str(p.expert)
@pg_test()
@pytest.mark.parametrize("device", devices)
def forward(device):
make_cudnn_deterministic()
model_dim = 8
num_experts = dist.get_world_size(dist.group.WORLD)
input = torch.randn(4, 16, model_dim).to(device)
gate = Top2Gate(model_dim, num_experts)
expert = torch.nn.Linear(model_dim, model_dim, bias=False)
# Use identity matrix
expert.weight = torch.nn.Parameter(torch.eye(model_dim))
moe = MOELayer(gate, expert).to(device)
output = moe(input)
assert output.shape == input.shape, f"{output.shape} != {input.shape}"
# Re-assembled output should match input due to identity expert.
torch.testing.assert_allclose(input, output)
@pg_test()
@pytest.mark.parametrize("device", devices)
def forward_multi(device):
make_cudnn_deterministic()
torch.set_printoptions(threshold=5000)
num_local_experts = 4
model_dim = 4
num_experts = dist.get_world_size(dist.group.WORLD) * num_local_experts
input = torch.randn(4 * num_local_experts, 16, model_dim).to(device)
gate = Top2Gate(model_dim, num_experts)
experts = []
for i in range(num_local_experts):
expert = torch.nn.Linear(model_dim, model_dim, bias=False)
# Use identity matrix
expert.weight = torch.nn.Parameter(torch.eye(model_dim))
experts += [expert]
moe = MOELayer(gate, torch.nn.ModuleList(experts)).to(device)
output = moe(input)
assert output.shape == input.shape, f"{output.shape} != {input.shape}"
# 90% of the input should have gone to an expert
assert (
len(output.nonzero(as_tuple=False)) / output.numel() > 0.90
), f"{len(output.nonzero(as_tuple=False))} / {output.numel()}"
# Except for zeros, re-assembled output should match input due to identity expert.
torch.testing.assert_allclose(input, torch.where(output > 0, output, input))
# Test Gate which round-robin routes tokens to experts
class RoundRobinGate(torch.nn.Module):
def __init__(self, model_dim, num_experts):
super().__init__()
self.model_dim = model_dim
self.num_experts = num_experts
def forward(self, input):
s = input.shape[0]
assert s % self.num_experts == 0, f"{s} % {self.num_experts} != 0"
capacity = 2 * s // self.num_experts
output = torch.zeros(s, self.num_experts, capacity, dtype=input.dtype, device=input.device)
for i in range(s):
output[i, i % self.num_experts, i // self.num_experts] = 1.0
return 0.0, output, output.bool()
@pg_test()
@pytest.mark.parametrize("device", devices)
def forward_routing(device):
make_cudnn_deterministic()
model_dim = 8
num_experts = dist.get_world_size()
input = torch.randn(4, 16, model_dim).to(device)
gate = RoundRobinGate(model_dim, num_experts)
expert = torch.nn.Linear(model_dim, model_dim, bias=False)
# Use scaling matrix (each rank has a different scale)
scale = dist.get_rank() + 1
expert.weight = torch.nn.Parameter(torch.eye(model_dim) * scale)
moe = MOELayer(gate, expert).to(device)
output = moe(input)
assert output.shape == input.shape, f"{output.shape} != {input.shape}"
# Verify that each token was sent to the correct expert by checking its scale.
t = input.shape[1]
for i in range(t):
expert = i % num_experts
torch.testing.assert_allclose(input[:, i] * (expert + 1), output[:, i])
@pg_test()
@pytest.mark.parametrize("device", devices)
def forward_routing_multi(device):
make_cudnn_deterministic()
model_dim = 8
num_local_experts = 4
num_experts = dist.get_world_size(dist.group.WORLD) * num_local_experts
input = torch.randn(4 * num_local_experts, 16, model_dim).to(device)
gate = RoundRobinGate(model_dim, num_experts)
experts = []
for i in range(num_local_experts):
expert = torch.nn.Linear(model_dim, model_dim, bias=False)
# Use scaling matrix (each rank has a different scale)
scale = dist.get_rank() * num_local_experts + i + 1
expert.weight = torch.nn.Parameter(torch.eye(model_dim) * scale)
experts += [expert]
moe = MOELayer(gate, torch.nn.ModuleList(experts)).to(device)
output = moe(input)
assert output.shape == input.shape, f"{output.shape} != {input.shape}"
# Verify that each token was sent to the correct expert by checking its scale.
t = input.shape[1]
for i in range(t):
expert = i % num_experts
torch.testing.assert_allclose(input[:, i] * (expert + 1), output[:, i])
@pg_test()
@pytest.mark.parametrize("device", devices)
def backward(device):
make_cudnn_deterministic()
loss = torch.nn.MSELoss()
model_dim = 8
num_experts = dist.get_world_size(dist.group.WORLD)
input = torch.randn(4, 16, model_dim).to(device)
gate = Top2Gate(model_dim, num_experts)
expert = torch.nn.Linear(model_dim, model_dim, bias=False)
# Use identity matrix
expert.weight = torch.nn.Parameter(torch.eye(model_dim))
moe = MOELayer(gate, expert).to(device)
output = moe(input)
assert output.shape == input.shape, f"{output.shape} != {input.shape}"
output = loss(output, input)
output.backward()
torch.testing.assert_allclose(expert.weight.grad, torch.zeros_like(expert.weight))
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test AdaScale with OSS. """
from statistics import mean
import tempfile
import numpy as np
import pytest
import torch
from torch import Tensor
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import Linear, Sequential
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from fairscale.fair_dev.testing.golden_testing_data import adascale_test_data
from fairscale.fair_dev.testing.testing import skip_if_single_gpu
from fairscale.optim import OSS, AdaScale, AdaScaleWrapper
def _dist_init(rank, world_size, tempfile_name, backend):
url = "file://" + tempfile_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
def _test_basic_func(rank, world_size, tempfile_name, test_case, oss, model=None):
_dist_init(rank, world_size, tempfile_name, backend="nccl")
if model is None:
model = Linear(2, 2)
model.bias.data.fill_(0.0)
model.to("cuda")
model = DDP(model, device_ids=[rank])
assert oss in ["none", "ada-oss", "wrapper-oss", "oss-wrapper"]
if oss == "ada-oss":
optim = AdaScale(OSS(model.parameters(), SGD, lr=0.1))
elif oss == "wrapper-oss":
optim = AdaScaleWrapper(model.parameters(), optim_cls=OSS, optim=SGD, lr=0.1)
elif oss == "oss-wrapper":
optim = OSS(model.parameters(), AdaScaleWrapper, optim_cls=SGD, lr=0.1)
else:
assert oss == "none"
optim = AdaScale(SGD(model.parameters(), lr=0.1))
if "input" in test_case:
inputs = [test_case["input"]]
else:
inputs = test_case["inputs"]
for in_data in inputs:
in_data = Tensor(in_data[rank]).cuda()
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if "expected_gain" in test_case:
assert np.allclose(optim.gain(), test_case["expected_gain"]), "{} vs {}".format(
optim.gain(), test_case["expected_gain"]
)
if "expected_mean_weight" in test_case:
mean_weight = mean([model.module[i].weight.data.mean().item() for i in range(4)])
assert np.allclose(mean_weight, test_case["expected_mean_weight"]), mean_weight
dist.destroy_process_group()
@skip_if_single_gpu
@pytest.mark.parametrize("test_case", adascale_test_data)
def test_basic(test_case):
"""Test adascale with DDP + OSS with trivial model"""
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(_test_basic_func, args=(world_size, temp_file_name, test_case, "ada-oss"), nprocs=world_size, join=True)
@skip_if_single_gpu
@pytest.mark.parametrize("oss", ["none", "ada-oss", "wrapper-oss", "oss-wrapper"])
def test_sequential(oss):
"""Test adascale with DDP + OSS with a sequential model"""
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
# Run multiple iterations, check the gain for both oss and non-oss cases.
#
# The inputs are picked arbitrarily. I used vectors that are orthogonal.
#
# The gain and mean_weight values are recorded from my testing and used here
# to ensure their value is unchanged from commit to commit unless we can
# explain why.
test_case = {
"inputs": [[[1.0, 0], [0, 1.0]], [[0, 1.0], [1.0, 0]]],
"expected_gain": 1.0335265132125744,
"expected_mean_weight": 52.92657661437988,
}
if oss == "oss-wrapper":
# When OSS wraps AdaScale, the training is numerically different
# and it exists only to enable future research. So we don't check
# the gain (OSS doesn't have a gain() function, different rank's
# gains are different). We just ensure the mean_weight is expected.
del test_case["expected_gain"]
test_case["expected_mean_weight"] = 94.93386840820312
# The model.
model = Sequential(
Linear(2, 3, bias=False), Linear(3, 4, bias=False), Linear(4, 5, bias=False), Linear(5, 6, bias=False)
)
# Weights need to be fixed for deterministic gain.
model[0].weight.data.copy_(Tensor(range(6)).reshape(3, 2) / mean(range(6)))
model[1].weight.data.copy_(Tensor(range(12)).reshape(4, 3) / mean(range(12)))
model[2].weight.data.copy_(Tensor(range(20)).reshape(5, 4) / mean(range(20)))
model[3].weight.data.copy_(Tensor(range(30)).reshape(6, 5) / mean(range(30)))
mp.spawn(_test_basic_func, args=(world_size, temp_file_name, test_case, oss, model), nprocs=world_size, join=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from typing import Any, List, Tuple, Union
import numpy as np
import pytest
from sklearn.datasets import make_blobs
import torch
from torch.cuda.amp.autocast_mode import autocast
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
from fairscale.fair_dev.common_paths import DATASET_CACHE_ROOT
from fairscale.fair_dev.testing.testing import skip_a_test_if_in_CI
from fairscale.optim.layerwise_gradient_scaler import LayerwiseGradientScaler
# Test: feed forward network
class FeedForward(torch.nn.Module):
def __init__(self, input_size: int, hidden_size: int):
torch.manual_seed(7)
super(FeedForward, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_size, self.hidden_size)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.identity = nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc3(out)
out = self.sigmoid(out)
out = self.identity(out)
return out
# assign labels
def blob_label(y: np.ndarray, label: int, loc: List) -> np.ndarray:
target = np.copy(y) # type: ignore
for l in loc:
target[y == l] = label
return target
def load_data(model_type: str) -> Union[DataLoader, Tuple[Any, Any]]:
data = None
if model_type == "linear_model":
torch.manual_seed(11)
x_train, y_train = make_blobs(n_samples=40, n_features=2, cluster_std=1.5, shuffle=True, random_state=10)
x_train = torch.FloatTensor(x_train)
y_train = torch.FloatTensor(blob_label(y_train, 0, [0]))
y_train = torch.FloatTensor(blob_label(y_train, 1, [1, 2, 3]))
data = (x_train, y_train)
if model_type == "vision_model":
torch.manual_seed(10)
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# TODO: we should NOT do this download over and over again during test.
train_ds = torchvision.datasets.CIFAR10(
root=DATASET_CACHE_ROOT,
train=True,
download=True,
transform=transform,
)
train_ds_loader = torch.utils.data.DataLoader(train_ds, batch_size=128, shuffle=False, num_workers=2)
image, _ = train_ds[0]
assert image.shape == torch.Size([3, 32, 32])
data = train_ds_loader # type: ignore
return data
def get_params_with_grad(trained_model):
result = []
for module_name, layer in trained_model.named_modules():
if module_name != "":
for param_name, param in layer.named_parameters():
if hasattr(param, "grad"):
logging.debug("testing equality for %s.%s" % (module_name, param_name))
result.append(param.grad)
return result
def train_linear_model(model: FeedForward, per_layer_scaling=False) -> FeedForward:
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
x_train, y_train = load_data("linear_model")
num_epochs = 2
model.train()
layers_to_scale = {"fc1": 1024, "fc2": 512, "fc3": 1024} if per_layer_scaling else {}
layer_scaler = LayerwiseGradientScaler(model, layers_to_scale)
for _ in range(num_epochs):
optimizer.zero_grad()
# scale the gradients
layer_scaler.scale()
with autocast():
# forward pass
y_pred = model(x_train)
# compute loss
loss = criterion(y_pred.squeeze(), y_train)
loss.backward()
# unscale the gradients
layer_scaler.unscale()
# update weights and scaling factor
layer_scaler.step(optimizer)
return model
def test_linear_model() -> None:
model1 = FeedForward(2, 10)
model2 = FeedForward(2, 10)
vanilla_model = train_linear_model(model1, False)
scaled_model = train_linear_model(model2, True)
for elt in zip(get_params_with_grad(vanilla_model), get_params_with_grad(scaled_model)):
assert torch.allclose(elt[0], elt[1])
# Test: convolutional network
class SimpleConvNet(nn.Module):
def __init__(self):
torch.manual_seed(24)
super(SimpleConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(120, 84)
self.relu4 = nn.ReLU()
self.fc3 = nn.Linear(84, 10)
self.identity = nn.Identity()
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.pool2(out)
out = torch.flatten(out, 1) # flatten all dimensions except batch
out = self.fc1(out)
out = self.relu3(out)
out = self.fc2(out)
out = self.relu4(out)
out = self.fc3(out)
out = self.identity(out)
return out
def train_vision_model(model: SimpleConvNet, per_layer_scaling=False):
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
if torch.cuda.is_available():
model.cuda()
train_ds_loader = load_data("vision_model")
model.train()
layer_scale_dict = {"conv1": 128, "conv2": 256, "fc1": 512, "fc2": 1024, "fc3": 8192} if per_layer_scaling else {}
layer_scaler = LayerwiseGradientScaler(model, layer_scale_dict)
for _ in range(2):
for img, lbl in train_ds_loader:
if torch.cuda.is_available():
img = img.cuda()
lbl = lbl.cuda()
optimizer.zero_grad()
layer_scaler.scale()
predict = model(img)
loss = loss_fn(predict, lbl)
loss.backward()
layer_scaler.unscale()
layer_scaler.step(optimizer)
return model
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_vision_model() -> None:
# The os.environ below doesn't seem to be enough if the test is run on CI with many other tests
# together.
# see: https://app.circleci.com/pipelines/github/facebookresearch/fairscale/4086/workflows/72b1470a-55f8-4a45-afe5-04641b093bef/jobs/45179/tests#failed-test-0
# Skipping for now.
# Also, TODO (Min): improving downloading code above before re-enable this.
skip_a_test_if_in_CI()
# Remove randomness from various sources while testing.
torch.use_deterministic_algorithms(True) # type: ignore
# set environment variable in CircleCI for test to pass: CUBLAS_WORKSPACE_CONFIG = :4096:8
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
m1 = SimpleConvNet()
m2 = SimpleConvNet()
vision_model = train_vision_model(m1, False)
scaled_vision_model = train_vision_model(m2, True)
for elt in zip(get_params_with_grad(vision_model), get_params_with_grad(scaled_vision_model)):
assert torch.allclose(elt[0], elt[1])
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test AdaScale with DDP/SDP/FSDP.
Even though it is tested here, AdaScale does NOT work with SDP/FSDP the
same way as DDP & gradient accumulation modes, because the full
gradients are not sent to each worker.
So they only have a slice of the reduced gradient in FSDP's case or
only a subset of gradients are reduced in SDP's. OTOH, each AdaScale
work receives full local-gradient. So the gain value computation is
off. If they use a slice (or subset) of their local-gradient, the gain
values they each compute will be different, which might or might not
be helpful for training.
"""
import tempfile
import numpy as np
import pytest
import torch
from torch import Tensor
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn import Linear
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from fairscale.fair_dev.testing.golden_testing_data import adascale_test_data
from fairscale.fair_dev.testing.testing import skip_if_single_gpu
from fairscale.internal import torch_version
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
from fairscale.nn.data_parallel import ShardedDataParallel as SDP
from fairscale.optim import OSS, AdaScale
def _dist_init(rank, world_size, tempfile_name, backend):
url = "file://" + tempfile_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
def _test_basic_func(rank, ddp_cls, world_size, tempfile_name, test_case):
_dist_init(rank, world_size, tempfile_name, backend="nccl") # Covers nccl
model = Linear(2, 2)
model.to("cuda")
if ddp_cls is DDP:
model = ddp_cls(model, device_ids=[rank])
optim = AdaScale(SGD(model.parameters(), lr=0.1))
elif ddp_cls is SDP:
optim = AdaScale(OSS(model.parameters(), SGD, lr=0.1))
model = ddp_cls(model, sharded_optimizer=optim)
else:
assert ddp_cls is FSDP, ddp_cls
# Two cases:
# flatten=True : AdaScale wrapper must be after FSDP and it receives
# a single grad tensor. It won't receive grad if
# wrapped before.
# flatten=False: AdaScale can be both before or after FSDP.
# So, it is better to do AdaScale after FSDP.
model = ddp_cls(model, flatten_parameters=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1))
if "input" in test_case:
# single iter
in_data = Tensor(test_case["input"][rank])
in_data = in_data.cuda()
out = model(in_data)
out.sum().backward()
if ddp_cls is DDP:
assert np.allclose(optim.gain(), test_case["expected_gain"]), optim.gain()
w, b = model.parameters()
assert np.allclose(w.grad.cpu(), test_case["expected_grad"]), w.grad
assert np.allclose(b.grad.cpu(), test_case["expected_bias_grad"]), b.grad
optim.step()
optim.zero_grad()
else:
# multiple iters
n = len(test_case["inputs"])
for i, in_data in enumerate(test_case["inputs"]):
in_data = Tensor(in_data[rank]).cuda()
out = model(in_data)
out.sum().backward()
if i == n - 1 and ddp_cls is DDP:
assert np.allclose(optim.gain(), test_case["expected_gain"]), optim.gain()
w, b = model.parameters()
assert np.allclose(w.grad.cpu(), test_case["expected_grad"]), w.grad
assert np.allclose(b.grad.cpu(), test_case["expected_bias_grad"]), b.grad
optim.step()
optim.zero_grad()
dist.destroy_process_group()
@skip_if_single_gpu
@pytest.mark.parametrize("ddp_cls", [DDP])
@pytest.mark.parametrize("test_case", adascale_test_data)
def test_basic(ddp_cls, test_case):
"""Test adascale with DDP without gradient accumulation"""
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(_test_basic_func, args=(ddp_cls, world_size, temp_file_name, test_case), nprocs=world_size, join=True)
@skip_if_single_gpu
@pytest.mark.parametrize("ddp_cls", [DDP, SDP, FSDP])
@pytest.mark.parametrize("test_case", adascale_test_data[:1])
def test_basic_all_dp(ddp_cls, test_case):
"""Test adascale with DDP/SDP/FSDP with just one test case."""
test_basic(ddp_cls, test_case)
def _test_grad_accum_func(rank, world_size, tempfile_name):
_dist_init(rank, world_size, tempfile_name, backend="gloo") # Covers gloo
model = Linear(4, 2, bias=False)
model.to("cuda")
model = DDP(model, device_ids=[rank])
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2)
with model.no_sync():
# iter 1, input vectors are pointing dim0 and dim1
in_data = Tensor([0.0] * 4)
in_data[rank] = 1.0
in_data = in_data.cuda()
out = model(in_data)
out.sum().backward()
# iter 2, input vectors are pointing dim2 and dim3
in_data = Tensor([0.0] * 4)
in_data[rank + 2] = 1.0
in_data = in_data.cuda()
out = model(in_data)
out.sum().backward()
# since all inputs are orthogonal, the gain should be exactly 4.0.
assert np.allclose(optim.gain(), 4.0), optim.gain()
optim.step()
optim.zero_grad()
dist.destroy_process_group()
@skip_if_single_gpu
def test_grad_accum():
"""Test adascale with DDP + gradient accumulation using ddp.no_sync()"""
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(_test_grad_accum_func, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def _test_corr_mean_func(rank, world_size, tempfile_name, test_case):
_dist_init(rank, world_size, tempfile_name, backend="gloo") # Covers gloo
model = Linear(3, 1, bias=False)
model.to("cuda")
model = DDP(model, device_ids=[rank])
optim = AdaScale(SGD(model.parameters(), lr=0.1))
results = []
last_grad = None
for i, in_data in enumerate(test_case["inputs"]):
# use no_sync so we can access nonreduced gradients
with model.no_sync():
in_data = Tensor(in_data[rank]).cuda()
out = model(in_data)
out.sum().backward()
results.append(optim._compute_intra_grad_corr_mean().item())
# sync gradients manually
for p in model.parameters():
if p.grad is not None:
dist.all_reduce(p.grad, op=dist.ReduceOp.SUM)
# divide by world size
p.grad.data.div_(world_size)
grad = optim._gather_flat_grad()
assert np.allclose(grad.cpu(), test_case["expected_grad"][i])
optim.step()
if last_grad is not None:
# compute cosine similarity
cos_similarity = torch.dot(grad, last_grad) / (grad.norm() * last_grad.norm())
np.allclose(cos_similarity.cpu(), test_case["expected_cos_similarity"][i])
last_grad = grad
optim.zero_grad()
assert np.allclose(results, test_case["expected_corr"]), results
dist.destroy_process_group()
@skip_if_single_gpu
@pytest.mark.skipif(
torch_version() < (1, 10, 0),
reason="torch.corrcoef available only for torch 1.10 or higher",
)
def test_corr_mean():
"""
Test _compute_intra_grad_corr_mean and _gather_flat_grad using ddp.no_sync()
We also demonstrate how cosine similarity between consecutive gradients can be computed using _gather_flat_grad
"""
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
from fairscale.fair_dev.testing.golden_testing_data import corr_mean_test_data
test_case = corr_mean_test_data[0]
mp.spawn(_test_corr_mean_func, args=(world_size, temp_file_name, test_case), nprocs=world_size, join=True)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test AdaScale with a single node (1 CPU or 1 GPU). """
import tempfile
import numpy as np
import pytest
import torch
from torch import Tensor
from torch.nn import Linear, Sequential
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from fairscale.fair_dev.testing.golden_testing_data import adascale_test_data
from fairscale.fair_dev.testing.testing import make_cudnn_deterministic, skip_if_no_cuda
from fairscale.fair_dev.testing.testing_memory import find_tensor_by_shape
from fairscale.optim import AdaScale
def test_basic_cpu():
"""Test single batch behavior on CPU"""
model = Linear(2, 2, bias=False)
try:
optim = AdaScale(SGD(model.parameters(), lr=0.1))
except RuntimeError:
return
assert False, "Single batch AdaScale should not be suppported"
def test_loss_accum_cpu():
"""Test the loss accumulation behavior on CPU
Loss accumulation is NOT SUPPORTED. This test shows that it does not work.
"""
model = Linear(2, 2, bias=False)
# num_gradients_to_accumulate value doesn't matter in this negative test.
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=3)
# data 1
in_data = Tensor([0.0, 1.0])
loss = model(in_data).sum()
# data 2
in_data = Tensor([1.0, 0.0])
loss += model(in_data).sum()
# data 3
in_data = Tensor([1.0, 2.0])
loss += model(in_data).sum()
# backward, but gradient is only produced once by the autograd engine.
loss.backward()
# The gain will always be 1, which renders adascale as noop.
assert np.allclose(optim.gain(), 1.0), optim.gain()
# We don't call optim.step(), since it will detect that backward is not yet done.
@pytest.mark.parametrize("cpu", [True, False])
@pytest.mark.parametrize("test_case", adascale_test_data)
@pytest.mark.parametrize("is_scaled_loss", [True, False])
def test_grad_accum(test_case, cpu, is_scaled_loss):
"""Test the basic functionality on CPU/GPU with gradient accumulation without DDP"""
make_cudnn_deterministic()
model = Linear(2, 2, bias=True)
if not cpu:
if torch.cuda.device_count() < 1:
pytest.skip("1 GPU is required")
model = model.cuda()
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2, is_scaled_loss=is_scaled_loss)
expected_gain = test_case["expected_gain"]
if "input" in test_case:
data = [test_case["input"]] * 2
gains = [expected_gain] * 2
else:
data = test_case["inputs"]
gains = [None, expected_gain]
for in_data, exp_gain in zip(data, gains): # test 2 iterations catch more corner cases.
# grad pass 1
in_data_0 = Tensor(in_data[0])
if not cpu:
in_data_0 = in_data_0.cuda()
loss = model(in_data_0).sum()
if is_scaled_loss:
loss = loss / 2
loss.backward()
# grad pass 2
in_data_1 = Tensor(in_data[1])
if not cpu:
in_data_1 = in_data_1.cuda()
loss = model(in_data_1).sum()
if is_scaled_loss:
loss = loss / 2
loss.backward()
if not is_scaled_loss:
optim.scale_grad_by_num_grads_to_accum()
if exp_gain is not None:
assert np.allclose(optim.gain(), exp_gain), optim.gain()
w, b = model.parameters()
assert np.allclose(w.grad.cpu(), test_case["expected_grad"]), w.grad
assert np.allclose(b.grad.cpu(), test_case["expected_bias_grad"]), b.grad
# stepping it. Note that if we did more than 2 passes as promised by the
# num_gradients_to_accumulate argument above, AdaScale is not be able to
# detect that mistake for now. The result will just be wrong in that case.
optim.step()
optim.zero_grad()
@skip_if_no_cuda
def test_state_checkpointing():
"""Test state checkpointing on GPU since that's the common case.
Note, we don't support checkpointing in the middle of gradient accumulation
step. Therefore, it is not tested here.
AdaScale doesn't have distributed state. Otherwise, it will need
a unit test for checkpointing with DDP.
"""
# Constants.
num_grads_to_accum = 3
in_dim = 5
# Setup.
def make_model_and_optim():
model = Linear(in_dim, 2, bias=False)
model = model.cuda()
optim = AdaScale(SGD(model.parameters(), lr=0.1, momentum=0.9), num_gradients_to_accumulate=num_grads_to_accum)
return model, optim
model, optim = make_model_and_optim()
# Run a bit.
def run_a_bit(replay_data=None):
data = []
replay_data_idx = 0
for _ in range(6): # run some steps
for i in range(num_grads_to_accum):
if replay_data is None:
in_data = torch.rand(in_dim).cuda()
data.append(in_data)
else:
in_data = replay_data[replay_data_idx]
replay_data_idx += 1
out = model(in_data)
out.sum().backward()
if i == num_grads_to_accum - 1:
optim.step()
optim.zero_grad()
return out, data
run_a_bit()
with tempfile.NamedTemporaryFile() as f:
temp_file_name = f.name
# Save a checkpoint.
torch.save({"model": model.state_dict(), "optim": optim.state_dict()}, temp_file_name)
# Train more.
out, replay_data = run_a_bit()
# Save the gain and out.
expected_out = out.sum().item()
expected_gain = optim.gain()
# Load back the checkpoint.
model, optim = make_model_and_optim() # They both need to start afresh.
ckpt = torch.load(temp_file_name)
model.load_state_dict(ckpt["model"])
optim.load_state_dict(ckpt["optim"])
# Train the same steps.
out, _ = run_a_bit(replay_data)
# Assert the results.
assert np.allclose(out.sum().item(), expected_out), out.sum().item()
assert np.allclose(optim.gain(), expected_gain), optim.gain()
def test_lr_scheduler():
"""Test AdaScale working with torch.optim.lr_scheduler."""
num_grads_to_accum = 3
model = Linear(2, 2, bias=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=num_grads_to_accum)
# We use 1, not 0.1 here since scheduler.step() is called here first.
scheduler = LambdaLR(optim, lr_lambda=lambda epoch: 1 / 10**epoch)
for epoch in range(3):
for data_idx in range(10):
for accumulation in range(num_grads_to_accum):
in_data = torch.rand(2)
loss = model(in_data).sum()
loss.backward()
assert optim.gain() <= 3, optim.gain()
optim.step()
optim.zero_grad()
# asserting LR is right
assert np.allclose(optim.param_groups[0]["lr"], 0.1 / 10**epoch), optim.param_groups[0]["lr"]
scheduler.step()
# asserting LR is right
assert np.allclose(optim.param_groups[0]["lr"], 0.1 / 10 ** (epoch + 1)), optim.param_groups[0]["lr"]
@skip_if_no_cuda
@pytest.mark.parametrize("debias_ewma", [True, False])
@pytest.mark.parametrize("is_scaled_loss", [True, False])
def test_add_param_group(debias_ewma, is_scaled_loss):
"""Test AdaScale supports add_param_group() API for both scaled and unscaled loss."""
num_grads_to_accum = 2
model1 = Linear(2, 2, bias=True)
with torch.no_grad():
# make weights and bias deterministic, which is needed for
# multi-layer models. For them, adascale gain is affected by
# parameters from other layers.
model1.weight.copy_(Tensor([1.0, 2.0, 3.0, 4.0]).reshape(2, 2))
model1.bias.fill_(0.1)
optim = AdaScale(
SGD(model1.parameters(), lr=0.1),
num_gradients_to_accumulate=2,
is_scaled_loss=is_scaled_loss,
debias_ewma=debias_ewma,
)
assert len(optim._hook_handles) == 2, len(optim._hook_handles)
model2 = Linear(2, 3, bias=True)
with torch.no_grad():
# make weights and bias deterministic
model2.weight.copy_(Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).reshape(3, 2))
model2.bias.fill_(0.2)
optim.add_param_group({"params": model2.parameters()})
assert len(optim._hook_handles) == 4, len(optim._hook_handles)
# make sure we can run the model.
model = Sequential(model1, model2).cuda()
in_data_0 = Tensor([1.0, 2.0]).cuda()
loss = model(in_data_0).sum()
if is_scaled_loss:
loss = loss / num_grads_to_accum
loss.backward()
in_data_1 = Tensor([3.0, 4.0]).cuda()
loss = model(in_data_1).sum()
if is_scaled_loss:
loss = loss / num_grads_to_accum
loss.backward()
if not is_scaled_loss:
optim.scale_grad_by_num_grads_to_accum()
# make sure the gains are right and we can step.
# since this is the first step, debias_ewma doesn't affect the value.
assert np.allclose(optim.gain(), 1.1440223454935758), optim.gain()
assert np.allclose(optim.gain(0), 1.1428571428571428), optim.gain(0)
assert np.allclose(optim.gain(1), 1.1471258476157762), optim.gain(1)
optim.step()
optim.zero_grad()
# make sure we can add a PG again after stepping.
model3 = Linear(3, 4, bias=True)
with torch.no_grad():
# make weights and bias deterministic
model3.weight.copy_(Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0] * 2).reshape(4, 3))
model3.bias.fill_(0.2)
optim.add_param_group({"params": model3.parameters()})
assert len(optim._hook_handles) == 6, len(optim._hook_handles)
# make sure we can run the model.
model = Sequential(model1, model2, model3).cuda()
in_data_0 = Tensor([1.0, 2.0]).cuda()
loss = model(in_data_0).sum()
if is_scaled_loss:
loss = loss / num_grads_to_accum
loss.backward()
in_data_1 = Tensor([3.0, 4.0]).cuda()
loss = model(in_data_1).sum()
if is_scaled_loss:
loss = loss / num_grads_to_accum
loss.backward()
if not is_scaled_loss:
optim.scale_grad_by_num_grads_to_accum()
# make sure gains are right and we can step.
# the last PG's gain is not affected by debias_ewma since it is the first step for that PG.
assert np.allclose(optim.gain(), 1.1382937715383077 if debias_ewma else 1.1391959826562015), optim.gain()
assert np.allclose(optim.gain(0), 1.142857206008338 if debias_ewma else 1.142857206006931), optim.gain(0)
assert np.allclose(optim.gain(1), 1.1116875516387468 if debias_ewma else 1.1116906378271827), optim.gain(1)
assert np.allclose(optim.gain(2), 1.0749164095196344), optim.gain(2)
optim.step()
optim.zero_grad()
@pytest.mark.parametrize(
"test_case",
[
{"num_grads_to_accum": 3, "exp_gain": 2.141385737279438},
{"num_grads_to_accum": 6, "exp_gain": 2.9927880097754036},
{"num_grads_to_accum": 9, "exp_gain": 3.4461759591877312},
],
)
@pytest.mark.parametrize("is_scaled_loss", [True, False])
def test_set_num_gradients_to_accumulate(test_case, is_scaled_loss):
"""Test set_num_gradients_to_accumulate experimental feature."""
num_grads_to_accum = test_case["num_grads_to_accum"]
exp_gain = test_case["exp_gain"]
model = Linear(2, 2, bias=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2, is_scaled_loss=is_scaled_loss)
loss = model(Tensor([0.0, 1.0])).sum()
if is_scaled_loss:
loss = loss / 2
loss.backward()
loss = model(Tensor([1.0, 0.0])).sum()
if is_scaled_loss:
loss = loss / 2
loss.backward()
if not is_scaled_loss:
optim.scale_grad_by_num_grads_to_accum()
assert np.allclose(optim.gain(), 2.0)
optim.step()
optim.zero_grad()
optim.set_scale(float(num_grads_to_accum))
optim.set_num_gradients_to_accumulate(num_grads_to_accum)
for _ in range(num_grads_to_accum):
loss = model(Tensor([0.0, 1.0])).sum() / num_grads_to_accum
if is_scaled_loss:
loss = loss / num_grads_to_accum
loss.backward()
if not is_scaled_loss:
optim.scale_grad_by_num_grads_to_accum()
assert np.allclose(optim.gain(), exp_gain), optim.gain()
optim.step()
optim.zero_grad()
def test_debias_ewma():
"""Test debias_ewma experimental feature"""
model = Linear(2, 2, bias=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2, debias_ewma=True)
for _ in range(4):
out = model(Tensor([0.0, 1.0]))
out.sum().backward()
out = model(Tensor([1.0, 0.0]))
out.sum().backward()
assert np.allclose(optim.gain(), 2.0), optim.gain()
optim.step()
optim.zero_grad()
def test_gradient_value():
"""Test that we don't mutate the gradients during backward"""
model = Linear(2, 2, bias=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2)
# fwd 1
out = model(Tensor([0.0, 1.0]))
out.sum().backward()
assert np.allclose(model.weight.grad.numpy(), [[0.0, 1.0], [0.0, 1.0]]), model.weight.grad
# fwd 2, grad is accumulated
out = model(Tensor([0.0, 1.0]))
out.sum().backward()
assert np.allclose(model.weight.grad.numpy(), [[0.0, 2.0], [0.0, 2.0]]), model.weight.grad
# assert gain and grad value before/after step/zero_grad
assert np.allclose(optim.gain(), 1.0000002499999376), optim.gain()
optim.step()
assert np.allclose(model.weight.grad.numpy(), [[0.0, 2.0], [0.0, 2.0]]), model.weight.grad
optim.zero_grad()
assert np.allclose(model.weight.grad.numpy(), [[0.0, 0.0], [0.0, 0.0]]), model.weight.grad
@pytest.mark.parametrize(
"test_case",
[
{"scale": None, "exp_gain": 4.0}, # default, baseline is single batch
{"scale": 4.0 / 3, "exp_gain": 4.0 / 3}, # baseline is grad_accum = 3
{"scale": 4.0 / 2, "exp_gain": 2.0}, # baseline is grad_accum = 2
{"scale": 4.0 / 1, "exp_gain": 4.0}, # baseline is single batch
],
)
def test_scale_not_equal_default(test_case):
"""Test gain value when scale doesn't equal world size * grad_accum"""
scale = test_case["scale"]
exp_gain = test_case["exp_gain"]
model = Linear(4, 2, bias=False)
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=4, scale=scale)
data = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
for i in range(4):
out = model(Tensor(data[i]))
out.sum().backward()
# Since the inputs are perfect orthogonal, the gain should be at the scale.
assert np.allclose(optim.gain(), exp_gain), optim.gain()
@skip_if_no_cuda
def test_unhook():
"""Test unhook that frees the tensor from CUDA memory."""
model = Linear(123, 456, bias=False).cuda() # unique shape so that it can be found
optim = AdaScale(SGD(model.parameters(), lr=0.1), num_gradients_to_accumulate=2)
torch.cuda.empty_cache()
target_shape = (456, 123)
assert find_tensor_by_shape(target_shape), "something wrong with gc-based method to find the tensor"
optim.unhook()
del model
del optim
torch.cuda.empty_cache()
assert not find_tensor_by_shape(target_shape), "tensor should have been released"
def test_custom_smoothing_factor():
"""Test custom smoothing since we had a bug around it."""
model = Linear(1, 1)
optim = AdaScale(SGD(model.parameters(), lr=0.1), smoothing=0.12345, num_gradients_to_accumulate=3)
assert optim._smoothing == 0.12345
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import functools
import pytest
import torch
try:
from fairscale.optim import Adam, GradScaler, Precision
imported_adam = True
except ImportError:
imported_adam = False
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
skip_if_no_adam = pytest.mark.skipif(not imported_adam, reason="Fairscale Adam not available")
@pytest.fixture(autouse=True)
def set_torch_seed():
torch.manual_seed(1)
yield
def make_full_precision_params():
weight = torch.randn(2, 1).cuda().requires_grad_()
bias = torch.randn(2).cuda().requires_grad_()
input = torch.randn(1).cuda()
return weight, bias, input
def make_half_precision_params():
weight = torch.randn(2, 1).cuda().half().requires_grad_()
bias = torch.randn(2).cuda().half().requires_grad_()
input = torch.randn(1).half().cuda()
return weight, bias, input
def step_test(optimizer, weight, bias, input):
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(5):
optimizer.step(fn)
assert fn().item() < initial_value
def state_dict_test(optimizer, weight, bias, input):
def fn_base(optimizer, weight, bias, input):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
return loss
fn = functools.partial(fn_base, optimizer, weight, bias, input)
# Prime the optimizer
for _i in range(5):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = weight.data.clone().requires_grad_()
bias_c = bias.data.clone().requires_grad_()
optimizer_c = Adam([weight_c, bias_c], lr=1e-3, precision=optimizer.precision)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c, input)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict)
for group, group_c in zip(optimizer.param_groups, optimizer_c.param_groups):
for p, p_c in zip(group["params"], group_c["params"]):
assert torch.equal(optimizer.state[p]["exp_avg"], optimizer_c.state[p_c]["exp_avg"])
assert torch.equal(optimizer.state[p]["exp_avg_sq"], optimizer_c.state[p_c]["exp_avg_sq"])
if optimizer.fp32_param_groups:
# When using mixed precision, fp32_param_groups are made from FP16 params rather than
# copied via state_dict, introducing differences between the original optimizer and
# the copy. Because this test requires that they be the exact same, we copy the
# fp32 params from the original optimizer to the copy
optimizer_c.fp32_param_groups = deepcopy(optimizer.fp32_param_groups)
# Run both optimizations in parallel
for _i in range(5):
optimizer.step(fn)
optimizer_c.step(fn_c)
assert torch.equal(weight, weight_c)
assert torch.equal(bias, bias_c)
def assert_almost_zero(x):
assert abs(x) < 1e-3
return 1.0
@skip_if_no_cuda
@skip_if_no_adam
def test_step_full_precision_inferred():
weight, bias, input = make_full_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float32
assert not optimizer.fp32_param_groups
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_mixed_precision_inferred():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
assert len(optimizer.fp32_param_groups) == len(optimizer.param_groups)
for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):
def assert_almost_zero(x):
assert abs(x) < 1e-3
return 1.0
assert fp32_p.dtype == torch.float32
if fp16_p.requires_grad:
assert fp16_p.dtype == torch.float16
(fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_memory_efficient():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float16
assert not optimizer.fp32_param_groups
assert optimizer.state[weight]["exp_avg"].dtype == torch.float32
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg"].dtype == torch.float32
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float32
@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
step_test(optimizer, weight, bias, input)
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad:
assert p.dtype == torch.float16
assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
assert not optimizer.fp32_param_groups
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).cuda(0).requires_grad_()
bias = torch.randn(10).cuda(1).requires_grad_()
input = torch.randn(5).cuda(0)
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_step_multigpu_mixed_precision():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).cuda(0).half().requires_grad_()
bias = torch.randn(10).cuda(1).half().requires_grad_()
input = torch.randn(5).cuda(0).half()
optimizer = Adam([weight, bias], lr=1e-3)
step_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_step_pure_fp16_multigpu():
if not torch.cuda.device_count() > 1:
return
weight = torch.randn(10, 5).half().cuda(0).requires_grad_()
bias = torch.randn(10).half().cuda(1).requires_grad_()
input = torch.randn(5).half().cuda(0)
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
step_test(optimizer, weight, bias, input)
assert optimizer.state[weight]["exp_avg"].dtype == torch.float16
assert optimizer.state[weight]["exp_avg_sq"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg"].dtype == torch.float16
assert optimizer.state[bias]["exp_avg_sq"].dtype == torch.float16
@skip_if_no_cuda
@skip_if_no_adam
def test_step_with_grad_scaler():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
scaler = GradScaler()
initial_value = None
for _i in range(5):
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
if _i == 0:
initial_value = loss.item()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
assert loss.item() < initial_value
@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_full_precision():
weight, bias, input = make_full_precision_params()
optimizer = Adam([weight, bias], lr=1e-3)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
@pytest.mark.xfail
def test_state_dict_mixed_precision():
# TODO: Optimizer state gets cast to FP16 and back to FP32 for
# mixed-precision and memory-efficient mixed-precision, resulting
# in a potential loss of precision. Thus, as training proceeds, we don't
# necessarily expect the parameters to remain the exact same.
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MIXED_PRECISION)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
@pytest.mark.xfail
def test_state_dict_memory_efficient():
# TODO: Optimizer state gets cast to FP16 and back to FP32 for
# mixed-precision and memory-efficient mixed-precision, resulting
# in a potential loss of precision. Thus, as training proceeds, we don't
# necessarily expect the parameters to remain the exact same.
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_state_dict_pure_fp16():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
state_dict_test(optimizer, weight, bias, input)
@skip_if_no_cuda
@skip_if_no_adam
def test_update_optim_scale():
weight, bias, input = make_half_precision_params()
optimizer = Adam([weight, bias], lr=1e-3, precision=Precision.PURE_FP16)
optimizer._optim_scale_update_freq = 1
optimizer._optim_scale = 2**15
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
optimizer.step()
assert optimizer._optim_scale == 2**16
@skip_if_no_cuda
@skip_if_no_adam
def test_exploding_optimizer_state():
weight = torch.tensor([[float("inf")]]).half().cuda().requires_grad_()
input = torch.tensor([1.0]).half().cuda().requires_grad_()
optimizer = Adam([weight], lr=1e-3, precision=Precision.PURE_FP16)
optimizer._optim_scale = 1.0
optimizer.zero_grad()
loss = (weight.mv(input)).pow(2).sum()
loss.backward()
with pytest.raises(RuntimeError):
optimizer.step()
@skip_if_no_cuda
@skip_if_no_adam
def test_build_fp32_params():
weight = torch.randn(10, 5).cuda().half().requires_grad_()
bias = torch.randn(10).cuda().half().requires_grad_()
optimizer = Adam([weight, bias], lr=1e-3)
optimizer._build_fp32_params([weight, bias])
for fp32_group, fp16_group in zip(optimizer.fp32_param_groups, optimizer.param_groups):
for fp32_p, fp16_p in zip(fp32_group["params"], fp16_group["params"]):
assert fp32_p.dtype == torch.float32
if fp16_p.requires_grad:
assert fp16_p.dtype == torch.float16
(fp32_p - fp16_p).to("cpu").detach().apply_(assert_almost_zero)
@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_beta():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(ValueError):
Adam([weight, bias], lr=1e-2, betas=(1.0, 0.0))
@skip_if_no_cuda
@skip_if_no_adam
def test_invalid_weight_decay():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(ValueError):
Adam([weight, bias], lr=1e-2, weight_decay=-1)
@skip_if_no_cuda
@skip_if_no_adam
def test_amsgrad():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(RuntimeError):
Adam([weight, bias], lr=1e-2, amsgrad=True)
@skip_if_no_cuda
@skip_if_no_adam
def test_mixed_precision_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.MIXED_PRECISION)
@skip_if_no_cuda
@skip_if_no_adam
def test_memory_efficient_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.MEMORY_EFFICIENT_MIXED_PRECISION)
@skip_if_no_cuda
@skip_if_no_adam
def test_pure_fp16_with_full_precision_parameters():
weight = torch.randn(10, 5, requires_grad=True).float().cuda()
bias = torch.randn(10, requires_grad=True).float().cuda()
with pytest.raises(AssertionError):
Adam([weight, bias], lr=1e-2, precision=Precision.PURE_FP16)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import copy
from math import inf
import tempfile
from typing import Any, Dict, Type, cast
import unittest
import numpy as np
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from fairscale.fair_dev.testing.testing import (
check_same_model_params,
check_same_models_across_ranks,
skip_if_no_cuda,
skip_if_py39_no_cuda,
skip_if_single_gpu,
)
from fairscale.internal import torch_version
import fairscale.optim as optim
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
RECIPIENT_RANK = 1
def dist_init(rank, world_size, tempfile_name, backend=BACKEND):
url = "file://" + tempfile_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
def sync_object_ranks(something_to_sync: Any, reference_rank: int, device: torch.device) -> Any:
package = [something_to_sync]
dist.broadcast_object_list(package, src=reference_rank, group=dist.group.WORLD)
package_sync = package[0]
return package_sync
class TestSingleRank(unittest.TestCase):
"""
All the following tests do not check for inter-process communication
"""
def setUp(self):
dist_init(0, 1, tempfile.mkstemp()[1])
def tearDown(self):
torch.distributed.destroy_process_group()
def test_create(self):
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs(self):
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
@skip_if_no_cuda
def test_device_change(self):
x = torch.nn.Linear(1, 1).to("cpu")
o = optim.OSS(x.parameters(), torch.optim.SGD, lr=0.1)
# Move the model to device after OSS was constructed
x.to(DEVICE)
x(torch.zeros(1, device=DEVICE)).backward()
# Check that OSS detects that the device changed
o.step()
# Check that the default device has been updated
assert o._default_device.type == DEVICE
def test_step_with_extra_inner_key(self):
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithNewKey, lr=0.1)
x.backward()
o.step()
assert o.param_groups[0]["new_key"] == 0.1
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure(self):
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict(self):
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
with pytest.raises(RuntimeError):
_ = o.state_dict()
def run_test_add_param_group(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have the same number of elements
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
assert len(o.optim.param_groups) == 2
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params[1:]:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
assert len(o.optim.param_groups) == 2
all_trainable()
some_trainable()
dist.destroy_process_group()
def test_add_param_group():
world_size = 4
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
mp.spawn(run_test_add_param_group, args=(world_size, tempfile.mkstemp()[1]), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
dist.destroy_process_group()
def test_zero_grad():
world_size = 2
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_zero_grad, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_empty_shard(rank, world_size, tempfile_name, backend):
dist_init(rank, world_size, tempfile_name, backend=backend)
m = torch.nn.Linear(1, 1)
x = torch.rand(20, 1)
if torch.cuda.is_available():
m = m.to(rank)
x = x.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x).sum()
y.backward()
o.step()
dist.destroy_process_group()
@pytest.mark.parametrize("backend", ["gloo", "nccl"])
def test_empty_shard(backend):
world_size = 4
if torch.cuda.is_available() and torch.cuda.device_count() < world_size:
world_size = min(world_size, torch.cuda.device_count())
if world_size == 1 or (backend == "nccl" and not torch.cuda.is_available()):
pytest.skip("Not enough GPUs to test with NCCL, or CUDA not present")
mp.spawn(run_test_empty_shard, args=(world_size, tempfile.mkstemp()[1], backend), nprocs=world_size, join=True)
def run_test_step(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank), f"{rank}: {m.weight.item()}, 0.75 expected"
assert m.bias == torch.tensor([1.85], device=rank), f"{rank}: {m.bias.item()}, 1.85 expected"
dist.destroy_process_group()
@skip_if_single_gpu
def test_step():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, tempfile_name, optimizer=None):
dist_init(rank, world_size, tempfile_name)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
dist.destroy_process_group()
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(run_test_step_with_closure, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name)
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * world_size
for size in sizes_world:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == sum(sizes)
dist.destroy_process_group()
def test_sharding():
world_size = 4
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
_, temp_file_name = tempfile.mkstemp()
mp.spawn(run_test_sharding, args=(world_size, temp_file_name), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank, tempfile_name):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
torch.cuda.set_device(rank)
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == len(list(model.parameters()))
else:
optimizer_state_dict = {}
# distribute to the other ranks
optimizer_state_dict = sync_object_ranks(optimizer_state_dict, reference_rank, device)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
# Check that the states are not None, but {}
for state in optimizer.state.values():
for _, _ in state.items():
pass
# Test the state dict materialization on all ranks
_ = optimizer.step(closure=closure)
optimizer_state_dict = optimizer.state_dict(all_ranks=True) # one per rank
optimizer.load_state_dict(optimizer_state_dict)
_ = optimizer.step(closure=closure)
check_same_models_across_ranks(model, dist.group.WORLD, params_should_be_equal=True, check_broadcast_buffers=False)
# Check that if the model is moved to cpu, the optimizer consolidation still works
model.cpu()
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
dist.destroy_process_group()
@skip_if_single_gpu
def test_collect_shards():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
reference_rank = 0
mp.spawn(
run_test_collect_shards,
args=(world_size, reference_rank, temp_file_name),
nprocs=world_size,
join=True,
)
def run_test_reproducibility(rank, world_size, tempfile_name, broadcast_fp16):
dist_init(rank, world_size, tempfile_name)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
torch.cuda.set_device(rank)
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 3, 3, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
model = DDP(model, device_ids=[device])
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
optimizer = optim.OSS(model.parameters(), optim=torch.optim.RMSprop, lr=0.1, broadcast_fp16=broadcast_fp16)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Get a snapshot of the state at this point
optimizer_state_dict = copy.deepcopy(optimizer.state_dict(all_ranks=True))
model_state_dict = copy.deepcopy(model.state_dict())
# Run two steps, log the loss
_ = optimizer.step(closure=closure)
reference_loss = optimizer.step(closure=closure)
# Load the optimizer state dict, rewind the state two steps back
optimizer.load_state_dict(optimizer_state_dict)
model.load_state_dict(model_state_dict)
# Run two new steps, log the loss again and check that we get the same
_ = optimizer.step(closure=closure)
test_loss = optimizer.step(closure=closure)
assert torch.allclose(reference_loss, test_loss), f"{reference_loss} vs {test_loss}. Reproducibility is broken"
# Check that no matter what the buffer is back to fp32
for device in optimizer.buckets.keys():
for bucket in optimizer.buckets[device].values():
assert bucket.buffer.dtype == torch.float32
dist.destroy_process_group()
@skip_if_single_gpu
@pytest.mark.parametrize("broadcast_fp16", [False, True])
def test_reproducibility(broadcast_fp16: bool):
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(
run_test_reproducibility,
args=(world_size, temp_file_name, broadcast_fp16),
nprocs=world_size,
join=True,
)
def run_test_multiple_groups(rank, world_size, tempfile_name):
# Only work with the even ranks, to check that the global_rank indexing is properly used
dist_init(rank=rank, world_size=world_size, tempfile_name=tempfile_name, backend="gloo")
sub_group_ranks = [0, 2, 4]
process_group = torch.distributed.new_group(ranks=sub_group_ranks, backend="gloo")
# Make sure that all the ranks get different training data
# So that the sync check in between their models is meaningful
torch.manual_seed(rank)
np.random.seed(rank)
# Standard deep learning setup
device = "cpu"
epochs, batch, input_width, hidden, target_width = 5, 3, 20, 10, 5
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer):
# Just run a couple of epochs, check that the model is properly updated
for _ in range(epochs):
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= world_size
loss.backward()
dist.all_reduce(loss, group=process_group) # Not strictly needed for the test below
return loss
_ = optimizer.step(closure=closure)
# Check that all the params are the same on all ranks
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = [p.clone() for _ in sub_group_ranks] if rank == 0 else []
dist.gather(p, receptacle, dst=0, group=process_group)
if rank == 0:
for sync_p in receptacle[1:]:
assert torch.all(
torch.eq(receptacle[0], sync_p)
), "Models differ in between ranks {} - {}".format(
torch.norm(receptacle[0]), torch.norm(sync_p)
)
if rank in sub_group_ranks:
# Model fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(
model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=2**20
)
check(optimizer)
# Model not-fitting in the broadcast bucket
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width)).to(
device
)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99, group=process_group, broadcast_buffer_size=0)
check(optimizer)
dist.destroy_process_group(process_group)
@skip_if_py39_no_cuda
def test_multiple_groups():
world_size = 6
temp_file_name = tempfile.mkstemp()[1]
mp.spawn(
run_test_multiple_groups,
args=(world_size, temp_file_name),
nprocs=world_size,
join=True,
)
def run_gradient_clipping(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
NORMS = [1.0, 2.0, 1, 2, inf]
CLIP_NORM = 0.3
def check(norm):
model_oss = torch.nn.Sequential(
torch.nn.Linear(input_width, hidden),
torch.nn.Linear(hidden, hidden),
torch.nn.Linear(hidden, target_width),
).to(device)
model = copy.deepcopy(model_oss)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss = DDP(
module=model_oss,
device_ids=[rank],
)
sharded_optimizer = optim.OSS(model_oss.parameters(), lr=0.1, momentum=0.99)
model = DDP(
model,
device_ids=[rank],
)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
model.zero_grad()
model_oss.zero_grad()
outputs = model(inputs)
outputs_oss = model_oss(inputs)
loss = loss_fn(outputs, target)
loss.backward()
loss_oss = loss_fn(outputs_oss, target)
loss_oss.backward()
torch.testing.assert_allclose(loss_oss, loss)
# Check the equivalence with the non-sharded optim
oss_total_norm = sharded_optimizer.clip_grad_norm(CLIP_NORM, norm_type=norm)
total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), CLIP_NORM, norm_type=norm)
assert torch.allclose(oss_total_norm, total_norm), "torch and fairscale should return the same grad norm"
# Check that the params have indeed been clipped
for params in sharded_optimizer._per_device_params.values():
for param in filter(lambda x: x.grad is not None, params[rank]):
assert torch.norm(param.grad, p=norm) < CLIP_NORM, f"param grad norm above clip : {param.grad}"
for norm in NORMS:
print(f"Checking norm {norm}")
check(norm)
# Check twice, catch an hypothetic iterator dumb mistake
check(norm)
dist.destroy_process_group()
@skip_if_no_cuda
def test_gradient_clipping():
world_size = 3
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_gradient_clipping,
args=(world_size, temp_file_name),
nprocs=world_size,
join=True,
)
def run_state_dict_distributed(rank, world_size, tempfile_name):
dist_init(rank, world_size, tempfile_name, backend="gloo")
device = torch.device(rank)
torch.manual_seed(rank) # make sure that the different rank get different data
# Setup two problems in parallel, we'll make sure that the second track (with save/load) follows the first one(untouched)
# We split the model in two to test the multiple param groups support
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model_oss1 = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, hidden)).to(device)
head_oss1 = torch.nn.Linear(hidden, target_width).to(device)
model_oss2 = copy.deepcopy(model_oss1)
head_oss2 = copy.deepcopy(head_oss1)
# For this test the gradients are (all) reduced in the same way in between the torch reference and fairscale.
# Normally OSS would use ShardedDDP and only reduce to the proper rank, but this does not change the
# gradient norm computation from OSS and adds a dependency.
# to keep the comparison apples-to-apples DDP is used in both cases
model_oss1 = DDP(
module=model_oss1,
device_ids=[rank],
)
sharded_optimizer1 = optim.OSS(model_oss1.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer1.add_param_group({"params": head_oss1.parameters()})
model_oss2 = DDP(
module=model_oss2,
device_ids=[rank],
)
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
loss_fn = torch.nn.L1Loss().to(device)
def run_grad_step(model, head, optimizer):
model.zero_grad()
outputs = head(model(inputs))
# pull the current state, broadcast it to all ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# re-create a new optimizer from scratch with absurd values, load the previous state
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=1e6, momentum=0.0001)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (before any steps)"
)
# now take a step and check that parameters are equal
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after stepping)"
)
# save the state dict for one model only, then distribute to the other ranks
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# Check that the pulled state and the .param_groups attribute are in sync
for replica in range(len(state_dict2["param_groups"])):
for k in state_dict2["param_groups"][replica].keys():
if k != "params":
assert state_dict2["param_groups"][replica][k] == sharded_optimizer2.param_groups[0][k]
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after consolidating)"
)
# save again for one rank, then distribute to the others
sharded_optimizer2.consolidate_state_dict(recipient_rank=RECIPIENT_RANK) # all ranks
state_dict2 = sharded_optimizer2.state_dict() if rank == RECIPIENT_RANK else {}
state_dict2 = sync_object_ranks(state_dict2, RECIPIENT_RANK, device)
# reload the state_dict
sharded_optimizer2 = optim.OSS(model_oss2.parameters(), lr=0.1, momentum=0.99)
sharded_optimizer2.add_param_group({"params": head_oss2.parameters()})
sharded_optimizer2.load_state_dict(state_dict2)
# take a step
run_grad_step(model_oss1, head_oss1, sharded_optimizer1)
run_grad_step(model_oss2, head_oss2, sharded_optimizer2)
check_same_model_params(
model_oss1, model_oss2, "parameters of the two identical models have diverged (after reloading)"
)
dist.destroy_process_group()
@skip_if_single_gpu
def test_state_dict_distributed():
world_size = 2
temp_file_name = tempfile.mkstemp()[1]
if torch.cuda.is_available():
world_size = max(world_size, torch.cuda.device_count())
mp.spawn(
run_state_dict_distributed,
args=(world_size, temp_file_name),
nprocs=world_size,
join=True,
)
def run_ddp_parity(rank, world_size, backend, temp_file_name, change_train_graph, broadcast_fp16):
url = "file://" + temp_file_name
dist.init_process_group(init_method=url, backend=backend, rank=rank, world_size=world_size)
device = torch.device("cuda")
torch.cuda.set_device(rank)
torch.manual_seed(rank)
np.random.seed(rank)
hidden = 5
in_channels = 3
out_channels = 3
batch = 64
def check_optimizer_equivalence(optimizer: Type[torch.optim.Optimizer], change_train_graph: bool = False):
# Any model works. Add one different buffer per rank
trunk = torch.nn.Sequential(
torch.nn.Linear(in_channels, hidden), torch.nn.Linear(hidden, hidden), torch.nn.Linear(hidden, hidden)
)
trunk.register_buffer("test_buffer", torch.ones(1) * rank)
trunk.to(device)
head = torch.nn.Linear(hidden, out_channels).to(device)
# Define a model to be trained by OSS
oss_module = torch.nn.Sequential(trunk, head)
# Make sure that the param groups are interleaved, to catch an ordering bug in the state dict
oss_trainable_params = [
{"params": list(trunk.parameters())[:-1] + list(head.parameters()), "lr": 1e-5},
{"params": list(trunk.parameters())[-1], "lr": 1e-4},
]
optimizer_settings: Dict[Any, Any] = {}
if isinstance(optimizer, torch.optim.SGD):
optimizer_settings["momentum"] = 0.9
sharded_optimizer = optim.OSS(
params=oss_trainable_params,
optim=optimizer,
group=None,
broadcast_buffer_size=2**10,
**optimizer_settings,
)
oss_ddp_model = DDP(module=oss_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
# Define a model to be trained by normal pytorch + DDP
ddp_trunk = copy.deepcopy(trunk)
ddp_head = copy.deepcopy(head)
ddp_module = torch.nn.Sequential(ddp_trunk, ddp_head)
ddp_trainable_params = [
{"params": list(ddp_trunk.parameters())[:-1] + list(ddp_head.parameters()), "lr": 1e-5},
{"params": list(ddp_trunk.parameters())[-1], "lr": 1e-4},
]
ddp_optimizer = optimizer(ddp_trainable_params, **optimizer_settings) # type: ignore
ddp_model = DDP(module=ddp_module, device_ids=[rank], broadcast_buffers=True, find_unused_parameters=True)
def check_step():
input_tensor = torch.rand((batch, in_channels)).to(device)
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = oss_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(torch.Tensor, ddp_optimizer.step(closure=closure_ddp))
loss_sharded_optim = cast(torch.Tensor, sharded_optimizer.step(closure=closure_sharded))
assert torch.allclose(
loss_ddp, loss_sharded_optim, rtol=1e-3
), f"Losses differ in between Pytorch optim and OSS\n {loss_ddp.item()} - {loss_sharded_optim.item()} - world size {world_size}"
check_same_model_params(oss_ddp_model, ddp_model)
# The model should be synchronized in between the ranks at construction time, check that
check_same_model_params(oss_ddp_model, ddp_model)
# The models should stay the same in between ddp and sharded optimizer
for i in range(5):
check_step()
# Check that altering the trainable parameters does not cause DDP and OSS to diverge
if change_train_graph:
# Flip the first parameter from trainable to non-trainable and vice-versa
next(ddp_module.parameters()).requires_grad = not next(ddp_module.parameters()).requires_grad
next(oss_module.parameters()).requires_grad = not next(oss_module.parameters()).requires_grad
# sharded_optimizer.refresh_trainable()
# Check that the checkpoints are compatible (post pytorch 1.5)
if torch_version()[1] > 5:
# - get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(recipient_rank=RECIPIENT_RANK)
sharded_optim_state_dict = sharded_optimizer.state_dict() if rank == RECIPIENT_RANK else {}
sharded_optim_state_dict = sync_object_ranks(sharded_optim_state_dict, RECIPIENT_RANK, device)
# - cross load the states
# run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict) # OSS will remove some states
ddp_optimizer.load_state_dict(sharded_optim_state_dict) # mixup on purpose !
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - self load, rewind, check no problem
# run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
for opt in [torch.optim.Adam, torch.optim.SGD]:
check_optimizer_equivalence(opt, change_train_graph=change_train_graph)
dist.destroy_process_group()
@pytest.mark.skip("broken at head")
@skip_if_no_cuda
@skip_if_single_gpu
@pytest.mark.parametrize("change_train_graph", [True, False])
@pytest.mark.parametrize("backend", [dist.Backend.NCCL, dist.Backend.GLOO])
@pytest.mark.parametrize("broadcast_fp16", [False, True])
def test_ddp_parity(change_train_graph: bool, backend: dist.Backend, broadcast_fp16: bool):
temp_file_name = tempfile.mkstemp()[1]
world_size = torch.cuda.device_count()
mp.spawn(
run_ddp_parity,
args=(world_size, backend, temp_file_name, change_train_graph, broadcast_fp16),
nprocs=world_size,
join=True,
)
|
from fairscale.internal import torch_version
def test_torch_version():
assert torch_version("") == tuple()
assert torch_version("bad format") == tuple()
assert torch_version("1.9.0") == (1, 9, 0)
assert torch_version("1.10.0a0+gitbc6fc3e") == (1, 10, 0)
assert torch_version("1.7.0+cu102") == (1, 7, 0)
assert torch_version("1.10.0a0+fb") == (1, 10, 0)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import itertools
import sys
import unittest
from unittest import mock
from parameterized import parameterized
import torch
from fairscale.fair_dev.testing.testing import dist_init, spawn_for_all_world_sizes
from fairscale.internal import torch_version
from fairscale.internal.reduce_scatter_bucketer import ReduceScatterBucketer
def rename_test(testcase_func, param_num, param):
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name(str(param.args)),
)
CONFIG_OPTIONS = [
[dict(zip(["bucket_cap_mb", "shard_size"], config))] for config in itertools.product([0, 0.25], [1, 262144])
]
class TestReduceScatterBucketer(unittest.TestCase):
# TODO(sshleifer): check if possible to reuse `DistributedTest, spawn_and_init`.
def setUp(self):
major, minor, _ = torch_version()
if major < 1 or (major == 1 and minor < 6):
raise unittest.SkipTest("Need pytorch version >= 1.6 due to reduce_scatter")
if not torch.cuda.is_available():
raise unittest.SkipTest("CUDA not available, skipping test")
if sys.platform == "win32":
raise unittest.SkipTest("NCCL doesn't support Windows, skipping test")
if torch.cuda.device_count() < 2:
raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping")
@parameterized.expand(CONFIG_OPTIONS, name_func=rename_test)
def test_reduce_scatter(self, config):
spawn_and_init(functools.partial(self._test_reduce_scatter, **config))
@staticmethod
def _test_reduce_scatter(rank, group, bucket_cap_mb=None, shard_size=None):
bucketer = ReduceScatterBucketer(bucket_cap_mb=bucket_cap_mb)
world_size = group.size()
tensors = [torch.ones(shard_size).cuda() for _ in range(world_size)]
tensors[rank].fill_(0)
input_bytes = shard_size * world_size * 4
bucket_bytes = bucket_cap_mb * 1024 * 1024
callback = mock.MagicMock()
bucketer.reduce_scatter_async(tensors, group, callback_fn=callback)
if bucket_cap_mb > 0 and input_bytes < bucket_bytes:
assert callback.call_count == 0
bucketer.flush()
assert callback.call_count == 1
result = callback.call_args[0][0] # get first positional arg
assert torch.is_tensor(result), result
assert torch.all(result == (world_size - 1))
def test_out_of_order_reduction(self):
spawn_and_init(self._test_out_of_order_reduction)
@staticmethod
def _test_out_of_order_reduction(rank, group):
bucketer = ReduceScatterBucketer(bucket_cap_mb=0.25)
world_size = group.size()
small_tensors = [torch.ones(1).cuda() for _ in range(world_size)]
big_tensors = [torch.ones(262144).cuda() for _ in range(world_size)]
more_small_tensors = [torch.ones(2).cuda() for _ in range(world_size)]
callback1 = mock.MagicMock()
callback2 = mock.MagicMock()
callback3 = mock.MagicMock()
bucketer.reduce_scatter_async(small_tensors, group, callback_fn=callback1)
assert callback1.call_count == 0
bucketer.reduce_scatter_async(big_tensors, group, callback_fn=callback2)
assert callback1.call_count == 0
assert callback2.call_count == 1
bucketer.reduce_scatter_async(more_small_tensors, group, callback_fn=callback3)
assert callback1.call_count == 0
assert callback2.call_count == 1
assert callback3.call_count == 0
bucketer.flush()
assert callback1.call_count == 1
assert callback2.call_count == 1
assert callback3.call_count == 1
def spawn_and_init(fn, args=None, **spawn_kwargs):
if args is None:
args = ()
run_fn = functools.partial(init_and_run, fn, args)
spawn_for_all_world_sizes(run_fn, **spawn_kwargs)
def init_and_run(fn, args, rank, world_size, filename, filename_rpc):
dist_init(rank, world_size, filename, filename_rpc)
group = torch.distributed.new_group()
fn(rank, group, *args)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test utility classes from containers.py. """
from collections import OrderedDict, namedtuple
import random
import pytest
import torch
import torch.nn as nn
from fairscale.internal.containers import (
apply_to_tensors,
pack_kwargs,
split_non_tensors,
unpack_kwargs,
unpack_non_tensors,
)
@pytest.mark.parametrize("devices", [["cpu"], ["cuda"], ["cpu", "cuda"]])
def test_apply_to_tensors(devices):
"""Test apply_to_tensors for both cpu & gpu"""
if "cuda" in devices and not torch.cuda.is_available() or torch.cuda.device_count() < 1:
pytest.skip("Skipped due to lack of GPU")
expected = 0
def get_a_tensor():
"""Return a random tensor on random device."""
dev = random.choice(devices)
shape = random.choice((1, (2, 3), (4, 5, 6), (7, 8, 9, 10)))
t = torch.rand(shape).to(dev)
nonlocal expected
expected += t.numel()
return t
# create a mixed bag of data.
data = [1, "str"] # list
# dict
data.append({"key1": get_a_tensor(), "key2": {1: get_a_tensor()}, "key3": 3})
# set
data.insert(0, set(["x", get_a_tensor(), get_a_tensor()]))
# tuple
data.append(([1], get_a_tensor(), 1, [get_a_tensor()], set((1, 2))))
# OrderedDict
od = OrderedDict()
od["k"] = "value"
data.append(od)
# namedtuple
NT = namedtuple("NT", ["key1", "key2"])
nt = NT(key1=1, key2=get_a_tensor())
data.append(nt)
total = 0
def fn(t, x=[[total]]):
nonlocal total
total += t.numel()
return t
new_data = apply_to_tensors(fn, data)
assert total == expected, f"{total} vs. {expected}"
for i, v in enumerate(data):
assert type(new_data[i]) == type(v), f"expected type {type(v)} got {type(new_data[i])}"
def test_pack_unpack():
"""Test pack_kwargs and unpack_kwargs."""
kwarg_keys, flat_args = pack_kwargs(1, 2, 3, 4)
assert kwarg_keys == tuple()
assert flat_args == (1, 2, 3, 4)
kwarg_keys, flat_args = pack_kwargs(a=1, b={2: "2"}, c={3}, d=[4], e=(5,))
assert kwarg_keys == ("a", "b", "c", "d", "e")
assert flat_args == (1, {2: "2"}, {3}, [4], (5,))
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
assert kwarg_keys == ("a", "b")
assert flat_args == (1, 2, 3, 4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == (1, 2)
assert kwargs == {"a": 3, "b": 4}
args, kwargs = unpack_kwargs([], flat_args)
assert kwargs == {}
assert args == (1, 2, 3, 4)
args, kwargs = unpack_kwargs(["a", "b", "c", "d"], flat_args)
assert kwargs == {"a": 1, "b": 2, "c": 3, "d": 4}
assert args == tuple()
with pytest.raises(AssertionError):
# too many keys should assert.
args, kwargs = unpack_kwargs(["a", "b", "c", "d", "e"], flat_args)
def test_split_unpack():
"""Test split_non_tensors and unpack_non_tensors."""
x = torch.Tensor([1])
y = torch.Tensor([2])
# degenerate case, args is a single tensor.
tensors, packed_non_tensors = split_non_tensors(x)
assert tensors == (x,)
assert packed_non_tensors is None
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
assert tensors == (x, y)
assert packed_non_tensors == {
"is_tensor": [True, True, False, False],
"objects": [None, 3],
}
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
tensors, packed_non_tensors = split_non_tensors((None, 3, x, y))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (None, 3, x, y)
tensors, packed_non_tensors = split_non_tensors((None, 3))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (None, 3)
tensors, packed_non_tensors = split_non_tensors((x, y))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y)
recon = unpack_non_tensors(tensors, None)
assert recon == (x, y)
with pytest.raises(AssertionError):
# assert the second arg should be a dict.
recon = unpack_non_tensors(tensors, set())
with pytest.raises(AssertionError):
# assert the content of the second arg should be sane.
recon = unpack_non_tensors(tensors, {"is_tensor": [], "objects": []})
def test_packed_sequence():
"""Test to ensure RNN packed sequences are modified correctly."""
rnn = nn.RNN(5, 5)
x = torch.rand((5, 1, 5), dtype=torch.float)
seq_length = torch.tensor([4], dtype=torch.int)
def fill_fn(x):
x.fill_(0)
x = nn.utils.rnn.pack_padded_sequence(x, seq_length)
x, h = rnn(x)
x = apply_to_tensors(fill_fn, x)
x, _ = nn.utils.rnn.pad_packed_sequence(x)
assert torch.sum(x) == 0
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test utility classes from fairscale.utils.parallel """
from parameterized import parameterized
import torch
from fairscale.internal.parallel import chunk_and_pad
@parameterized.expand([[num_chunks] for num_chunks in range(1, 33)])
def test_chunk_and_pad(num_chunks):
max_tensor_size = 256
tensor = torch.zeros(max_tensor_size)
for tensor_size in range(1, max_tensor_size + 1):
tensor_i = tensor[:tensor_size]
chunks = chunk_and_pad(tensor_i, num_chunks)
assert len(chunks) == num_chunks
assert all(len(chunks[0]) == len(chunk) for chunk in chunks)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test utility classes from state_dict.py. """
import torch
from torch import nn
from fairscale.internal.state_dict import find_module_instances, replace_by_prefix_
def test_find_module_instances():
net = nn.Sequential(
nn.Linear(1, 1), nn.ModuleDict({"ln": nn.LayerNorm(1), "linear": nn.Linear(1, 1)}), nn.LayerNorm(1)
)
assert find_module_instances(net, nn.LayerNorm) == [("1.ln.", net[1]["ln"]), ("2.", net[2])]
assert find_module_instances(net, nn.Linear) == [("0.", net[0]), ("1.linear.", net[1]["linear"])]
assert find_module_instances(net, nn.Dropout) == []
assert find_module_instances(net, nn.Sequential) == [("", net)]
def test_replace_by_prefix():
state_dict = {"layer.a": torch.tensor(1), "abc.layer.def": torch.tensor(2), "layer.b": torch.tensor(3)}
replace_by_prefix_(state_dict, "layer.", "module.layer.")
assert state_dict == {
"module.layer.a": torch.tensor(1),
"abc.layer.def": torch.tensor(2),
"module.layer.b": torch.tensor(3),
}
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# type: ignore
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from typing import Any, List
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "FairScale"
copyright = "2020-2022, Facebook/Meta AI Research"
author = "Facebook/Meta AI Research"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon", # support NumPy and Google style docstrings
"recommonmark",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
]
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
autodoc_inherit_docstrings = False
autodoc_member_order = "bysource"
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[Any] = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# List of custom sections allowed. It is especially useful when the argument
# list is very long for a constructor or function. This helps split the
# arguments into different sections, helping us to understand the arguments
# better.
napoleon_custom_sections = [
("SlowMo Parameters", "params_style"),
("LocalSGD Parameters", "params_style"),
("SGP Parameters", "params_style"),
("Debugging Parameters", "params_style"),
("Parameters for Advanced Users", "params_style"),
]
# -- Options for HTML output -------------------------------------------------
html_theme = "pytorch_sphinx_theme"
templates_path = ["_templates"]
# Add any paths that contain custom static files (such as style sheets) here,
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"includehidden": True,
"canonical_url": "https://fairscale.readthedocs.io",
"pytorch_project": "docs",
"logo_only": True, # default = False
}
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# setting custom stylesheets https://stackoverflow.com/a/34420612
html_context = {"css_files": ["_static/css/customize.css"]}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "fairscaledocs"
# Over-ride PyTorch Sphinx css
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"url_resolver": lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
"enable_math": True,
"enable_inline_math": True,
"enable_eval_rst": True,
"enable_auto_toc_tree": True,
},
True,
)
app.add_transform(AutoStructify)
app.add_css_file("css/customize.css")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import defaultdict
from functools import reduce
import gc
import logging
import math
import operator
import time
from datasets.wikitext2_data import get_real_dataloaders as get_real_wikitext2_dataloaders
from datasets.wikitext2_data import get_synthetic_dataloaders as get_synthetic_wikitext2_dataloaders
from models import transformer_lm
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import Adam
from benchmarks.golden_configs.lm_wikitext2 import FSDP as lm_wikitext2
from fairscale.nn import auto_wrap, default_auto_wrap_policy, enable_wrap
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
RPC_PORT = 29501
def verify_peak_memory(rank, golden_config, std_dev):
logging.debug(
"Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"])
)
current_device_usage = torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"]
golden_ref = golden_config["peak_mem_usage"][rank]
if not current_device_usage < golden_ref * std_dev:
raise RuntimeError(
"Peak memory usage for cuda device {:d} is {:d} which"
"is less than golden reference value of {:d}".format(rank, current_device_usage, golden_ref)
)
def verify_lm_run(wps, golden_config, args):
"""Verify that words per second for a given benchmark run matches the golden data."""
if torch.distributed.get_rank() == 0:
# Assert that words per second is within 3 standard deviations of the average
# of five golden runs
logging.info("Throughput(wps) is {:.2f}.".format(wps))
if not wps > (golden_config["avg_wps"] - (3 * golden_config["std_dev_wps"])):
raise RuntimeError(
"Throughput(wps):{:.2f} is below the golden threshold of an "
"average value of {:.2f} and standard dev of {:.2f}.".format(
wps, golden_config["avg_wps"], golden_config["std_dev_wps"]
)
)
for i in range(torch.cuda.device_count()):
verify_peak_memory(i, golden_config, 1.1)
def init_random_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def get_model_and_optimizer(args, device, benchmark_config, model_config):
"""Return instantiated model and optimizer function."""
if args.model_name == "lm":
model = get_lm_model(args, device, model_config)
lr = benchmark_config["lr"]
def make_adam(params):
return Adam(params, lr=lr)
optimizer = make_adam
return model, optimizer
def get_lm_model(args, device, config):
"""Get language model(based on GPT-2) used for sequence prediction."""
ninp = config["ninp"]
nhead = config["nhead"]
initrange = config["initrange"]
dropout = config["dropout"]
vocab_size = config["vocab_size"]
nhid = config["nhid"]
ndecoder = config["num_decoder_layers"]
return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
def get_tensors_by_size_bucket():
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def log_number_of_parameters(model):
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if hasattr(model, "group"):
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
print(
f"training model, #params = {num_params/10**6}M, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
print(f"total #params = {total.item()}")
else:
print(f"training model, #params = {num_params/10**6}M")
def get_device(model, index):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if hasattr(model, "devices"):
return model.devices[index]
else:
return torch.cuda.current_device()
def get_fake_dataloader(lm_dataloader_len, args):
fake_input = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return fake_input
def __len__(self):
return lm_dataloader_len
return FakeDataset()
def train(model_config, model, benchmark_config, model_specs, args):
lm_dataloader, _, _ = model_config["data"]
criterion = benchmark_config["criterion"]
vocab_size = model_specs["vocab_size"]
optimizer = model_config["optimizer"]
if not args.benchmark_eval:
model.train()
log_number_of_parameters(model)
total_loss = 0.0
word_counter = 0
optimizer = optimizer(model.parameters())
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
total_tokens = 0
total_tokens_per_log_interval = 0
bptt = 2
start_time = time.time()
epoch_start_time = 0.0
def get_batch(source):
seq_len = len(source) - 1
data = source[0:seq_len]
target = source[1 : 1 + seq_len]
return data, target
for i, batch in enumerate(lm_dataloader):
if i == 1:
epoch_start_time = time.time()
source, target = get_batch(batch)
if args.full_fp16:
# source = source.half()
target = target.half()
if args.max_batch and i > args.max_batch:
break
if i > 0:
total_tokens += source.numel()
if args.benchmark_eval:
input = source.cuda()
target = target.cuda()
output = model(input)
print(f"output.dtype {output.dtype}, target.dtype {target.dtype}")
loss = torch.nn.CrossEntropyLoss()(output.view(-1, vocab_size), target.view(-1))
else:
optimizer.zero_grad()
input = source.cuda()
target = target.cuda()
output = model(input)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), model_specs["clip_value"])
optimizer.step()
total_loss += loss.item()
log_interval = 1
total_tokens_per_log_interval += source.numel()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
if dist.get_rank() == 0:
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, total_tokens_per_log_interval / elapsed, cur_loss, math.exp(cur_loss)
)
)
total_tokens_per_log_interval = 0
total_loss = 0
start_time = time.time()
if epoch_start_time != 0:
torch.cuda.synchronize()
wps = total_tokens / (time.time() - epoch_start_time)
else:
raise RuntimeError(
"Unable to benchmark on a single batch. Increase the size " " of the dataset and rerun the benchmark."
)
return wps, loss.item()
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(model_config, model, benchmark_config, model_specs, args):
golden_config = get_golden_config(args.model_name, args)
epoch = benchmark_config["epochs"]
start_time = time.time()
if dist.get_rank() == 0:
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
wps, loss = train(model_config, model, benchmark_config, model_specs, args)
elapsed_time = time.time() - start_time
if dist.get_rank() == 0:
print("-" * 110)
print("| end of epoch {:1d} | time: {:5.2f}s | train loss {:5.2f} ".format(epoch, elapsed_time, loss))
print("-" * 110)
print("Throughput(wps) is {:.2f}.".format(wps))
print(
"Peak allocated bytes on cuda:{}: {:4f}GB".format(
dist.get_rank(), torch.cuda.memory_stats(dist.get_rank())["allocated_bytes.all.peak"] / 2**30
)
)
verify_lm_run(wps, golden_config, args)
def get_synthetic_dataloaders(args, device, benchmark_config, model_specs):
"""Returns dataloader for synthetic data."""
if args.model_name == "lm":
return get_synthetic_wikitext2_dataloaders(args, benchmark_config, model_specs)
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_real_dataloaders(args, device, benchmark_config, model_specs):
"""Returns dataloaders for real data."""
if args.model_name == "lm":
data = get_real_wikitext2_dataloaders(args, benchmark_config, model_specs)
ntokens, train_dataloader, valid_dataloader, test_dataloader = data
model_specs["vocab_size"] = ntokens
return train_dataloader, valid_dataloader, test_dataloader
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def create_model_config(args, benchmark_config=None, model_specs=None):
"""Return a dict with the given model, dataset and optimizer."""
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if args.use_synthetic_data:
dataloader_fn = get_synthetic_dataloaders
else:
dataloader_fn = get_real_dataloaders
data = dataloader_fn(args, device, benchmark_config, model_specs)
model, optimizer = get_model_and_optimizer(args, device, benchmark_config, model_specs)
return {
"model": model,
"optimizer": optimizer,
"data": data,
}
def create_benchmark_config(model_name):
"""Return a dict with configurations required for benchmarking `model_name` model."""
if model_name == "lm":
return lm_wikitext2.get_benchmark_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_model_specs(model_name):
"""Return a dict with configurations required for configuring `model_name` model."""
if model_name == "lm":
return lm_wikitext2.get_model_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_golden_config(model_name, args):
"""Return a dict with the golden data for throughput and memory usage."""
if model_name == "lm":
return lm_wikitext2.get_golden_synthetic_stats()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def benchmark_fsdp(rank, args, world_size):
"""Benchmark a given model using a single process and multiple devices."""
init_method_pgroup = "tcp://localhost:{}".format(RPC_PORT)
torch.distributed.init_process_group(
backend="nccl", rank=rank, world_size=world_size, init_method=init_method_pgroup
)
torch.cuda.set_device(rank)
init_random_seed(0)
benchmark_config = create_benchmark_config(args.model_name)
model_specs = get_model_specs(args.model_name)
model_config = create_model_config(args, benchmark_config=benchmark_config, model_specs=model_specs)
model = model_config["model"]
config = {}
if args.full_fp16:
config["compute_dtype"] = torch.float16
config["mixed_precision"] = False
if args.enable_auto_wrap:
with enable_wrap(wrapper_cls=FSDP, **config):
fsdp_model = auto_wrap(model, auto_wrap_policy=default_auto_wrap_policy)
fsdp_model = FSDP(fsdp_model, **config)
else:
fsdp_model = FSDP(model, **config)
if args.full_fp16:
fsdp_model = fsdp_model.half()
print(f"param dtype {[p.dtype for p in fsdp_model.parameters()]}")
if args.dry_run:
train(model_config, fsdp_model, benchmark_config, model_specs, args)
else:
benchmark_language_model(model_config, fsdp_model, benchmark_config, model_specs, args)
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--max_batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--use_synthetic_data", action="store_true", help="Uses synthetic data for running benchmarks.")
parser.add_argument("--dry_run", action="store_true", help="Run a sample training run without regression testing.")
parser.add_argument(
"--model_name",
default="lm",
help="Language Model(LM) used to benchmark FSDP.",
)
parser.add_argument("--debug", action="store_true", default=False, help="Display additional debug information")
parser.add_argument("--enable_auto_wrap", action="store_true", default=False, help="Use auto_wrap with FSDP")
parser.add_argument("--benchmark_eval", action="store_true", default=False, help="Benchmark evaluation workflow.")
parser.add_argument("--full_fp16", action="store_true", default=False, help="Benchmark in full fp16 mode.")
if __name__ == "__main__":
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
print(f"Running FSDP benchmark with args: {args}")
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
mp.spawn(
benchmark_fsdp,
args=(args, num_devices),
nprocs=num_devices,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from enum import Enum
import importlib
import logging
import tempfile
import time
from typing import Any, List, Optional, cast
from golden_configs import oss_mnist
import numpy as np
import torch
import torch.autograd.profiler as profiler
from torch.cuda.amp import GradScaler as TorchGradScaler
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import BatchSampler, DataLoader, Sampler
from torch.utils.data.distributed import DistributedSampler
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Resize, ToTensor
from benchmarks.datasets.mnist import setup_cached_mnist
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
TEMPDIR = tempfile.gettempdir()
def dist_init(rank, world_size, backend):
logging.info(f"Using backend: {backend}")
dist.init_process_group(backend=backend, init_method="tcp://localhost:29501", rank=rank, world_size=world_size)
def get_problem(rank, world_size, batch_size, device, model_name: str):
# Select the desired model on the fly
logging.info(f"Using {model_name} for benchmarking")
try:
model = getattr(importlib.import_module("torchvision.models"), model_name)(pretrained=False).to(device)
except AttributeError:
model = getattr(importlib.import_module("timm.models"), model_name)(pretrained=False).to(device)
# Data setup, duplicate the grey channels to get pseudo color
def collate(inputs: List[Any]):
return {
"inputs": torch.stack([i[0] for i in inputs]).repeat(1, 3, 1, 1).to(device),
"label": torch.tensor([i[1] for i in inputs]).to(device),
}
# Transforms
transforms = []
if model_name.startswith("vit"):
# ViT models are fixed size. Add a ad-hoc transform to resize the pictures accordingly
pic_size = int(model_name.split("_")[-1])
transforms.append(Resize(pic_size))
transforms.append(ToTensor())
dataset = MNIST(transform=Compose(transforms), download=False, root=TEMPDIR)
sampler: Sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)
batch_sampler = BatchSampler(sampler, batch_size, drop_last=True)
dataloader = DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=collate)
loss_fn = nn.CrossEntropyLoss()
return model, dataloader, loss_fn
class OptimType(str, Enum):
vanilla = "pytorch"
oss_ddp = "oss_ddp"
oss_sharded_ddp = "oss_sharded_ddp"
everyone = "everyone"
def validate_benchmark(measurements, final_loss, args, check_regression):
"""Validate the measurments against the golden benchmark config."""
golden_data = oss_mnist.get_golden_real_stats()
max_memory = -1.0
rank = dist.get_rank()
if not args.cpu:
# TODO(anj-s): Check if we need to synchronize before we caculate total training time.
torch.cuda.synchronize(rank)
max_memory = torch.cuda.max_memory_allocated(rank) / 2**20
logging.info(f"[{rank}] : Peak memory {max_memory:.1f}MiB")
measurements.sort()
median = measurements[len(measurements) // 2]
# Compute the median and median of absolute differences img per second.
abs_diff = list(map(lambda x: abs(x - median), measurements))
abs_diff.sort()
mad = abs_diff[len(measurements) // 2] if args.epochs > 2 else -1
# TODO(anj-s): Add a debug flag to perform the above calculation only when required.
logging.info(f"[{rank}] : Median speed: {median:.2f} +/- {mad:.2f}")
if check_regression and rank == 0:
assert median + 8.0 * mad > golden_data["reference_speed"], (
f"Speed regression detected: " f"{median + 8.0 * mad} vs. {golden_data['reference_speed']}"
)
assert max_memory < 1.05 * golden_data["reference_memory"], (
f"Memory use regression detected: " f"{max_memory} vs. {1.05* golden_data['reference_memory']}"
)
# any min_loss < than golden + epsilon is OK.
assert cast(float, final_loss) - golden_data["reference_loss"] < 1e-2, (
f"Loss regression detected: " f"{final_loss} vs. {golden_data['reference_loss']}"
)
logging.info("[Regression Test] VALID")
def train(
rank: int,
args: argparse.Namespace,
backend: str = "gloo",
optim_type: OptimType = OptimType.vanilla,
check_regression: bool = True,
):
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
use_multi_tensor = args.multi_tensor_optim and hasattr(torch.optim, "_multi_tensor")
OPTIM = torch.optim._multi_tensor.RMSprop if use_multi_tensor else torch.optim.RMSprop # type: ignore # attr is checked but mypy misses that
logging.info("Multi tensor optimizer: {}".format(use_multi_tensor))
# DDP
dist_init(rank=rank, world_size=args.world_size, backend=backend)
# Setup
if not args.cpu:
torch.cuda.set_device(rank)
torch.cuda.manual_seed(0)
torch.manual_seed(0) # also sets the cuda seed
np.random.seed(0)
if backend == "nccl":
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cpu") if args.cpu else torch.device(rank)
model, dataloader, loss_fn = get_problem(rank, args.world_size, args.batch_size, device, args.model)
# Shard the optimizer
optimizer: Optional[torch.optim.Optimizer] = None
model = cast(nn.Module, model)
scaler = (TorchGradScaler() if args.optim_type == OptimType.vanilla else ShardedGradScaler()) if args.amp else None
if optim_type == OptimType.oss_sharded_ddp:
optimizer = OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)
# Single node run typically, no need for reduce buckets
model = ShardedDDP(model, optimizer, reduce_buffer_size=0)
else:
device_ids = None if args.cpu else [rank]
model = DDP(model, device_ids=device_ids, find_unused_parameters=False) # type: ignore
optimizer = (
OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)
if optim_type == OptimType.oss_ddp
else OPTIM(model.parameters(), lr=1e-4, momentum=0.9)
)
optimizer = cast(torch.optim.Optimizer, optimizer)
# Reset the memory use counter
if not args.cpu:
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats(rank)
torch.cuda.synchronize(rank)
# Standard training loop
training_start = time.monotonic()
model.train()
measurements = []
final_loss: Optional[float] = -1.0
min_loss = 100.0
need_profiling = args.profile
for epoch in range(args.epochs):
n_items = 0
epoch_runtime = 0.0
for batch in dataloader:
if not args.cpu:
torch.cuda.synchronize(rank)
batch_start = time.monotonic()
def closure(data=batch, grad_scaler=None):
model.zero_grad()
if args.debug and rank == 0 and next(model.parameters()).grad is not None:
logging.debug(
"\nbefore: param {} -- grad {}".format(
next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
)
)
if grad_scaler is not None:
# Automatically computes the FW pass in half precision
with torch.cuda.amp.autocast():
outputs = model(data["inputs"])
loss = loss_fn(outputs, data["label"])
# Accumulates scaled gradients.
grad_scaler.scale(loss).backward()
else:
outputs = model(data["inputs"])
loss = loss_fn(outputs, data["label"])
loss.backward()
if args.debug and rank == 0 and next(model.parameters()).grad is not None:
logging.debug(
"after BW: param {} -- grad {}".format(
next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
)
)
return loss
def run_closure(closure, scaler, optimizer):
if scaler is not None:
final_loss = closure(grad_scaler=scaler) # AMP scaler.step does not support closures
scaler.step(optimizer)
scaler.update()
return final_loss
else:
return optimizer.step(closure)
if need_profiling and not args.cpu:
logging.info("Profiling the run")
with profiler.profile(use_cuda=True, record_shapes=True, profile_memory=True) as prof: # type: ignore
with profiler.record_function("batch"):
final_loss = run_closure(closure, scaler, optimizer)
prof.export_chrome_trace(f"{optim_type}_trace_rank_{rank}.json")
need_profiling = False # only profile once
else:
final_loss = run_closure(closure, scaler, optimizer)
if args.debug and rank == 0:
logging.debug("buffer: {}".format(next(model.buffers()).norm().item()))
logging.debug(
"after update: param {} -- grad {}".format(
next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
)
)
n_items += args.batch_size
if not args.cpu:
# make sure that the cuda kernels are finished before taking a timestamp
torch.cuda.synchronize(rank)
batch_end = time.monotonic()
epoch_runtime += batch_end - batch_start
if optim_type == OptimType.oss_ddp or optim_type == OptimType.oss_sharded_ddp:
# Check the checkpointing in the case of the OSS optimizer
# Memory usage could spill over from there
optimizer = cast(OSS, optimizer)
optimizer.consolidate_state_dict()
if dist.get_rank() == 0:
_ = optimizer.state_dict()
logging.info("... State dict collected")
measurements.append(n_items / epoch_runtime)
min_loss = min(min_loss, final_loss)
if dist.get_rank() == 0:
logging.info(
f"Epoch {epoch} - processed {measurements[-1]:.2f} img per sec. "
f"Loss {final_loss:.3f} min loss {min_loss:.3f}"
)
training_stop = time.monotonic()
img_per_sec = n_items / (training_stop - training_start) * args.epochs
logging.info(f"[{dist.get_rank()}] : Training done. {img_per_sec:.2f} img per sec inc. checkpoint")
# Use min_loss to check instead of final_loss since the final_loss is a bit random.
# If the training min_loss reaches certain number, we can be reasonably certain the
# training process was correct.
validate_benchmark(measurements, min_loss, args, check_regression)
dist.destroy_process_group() # type: ignore
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Benchmark the optimizer state sharding, on a typical computer vision workload"
)
parser.add_argument("--world_size", action="store", default=2, type=int)
parser.add_argument("--epochs", action="store", default=10, type=int)
parser.add_argument("--batch_size", action="store", default=256, type=int)
parser.add_argument("--check_regression", action="store_true", default=False)
parser.add_argument(
"--optim_type", type=OptimType, choices=[o.value for o in OptimType], default=OptimType.everyone
)
parser.add_argument("--gloo", action="store_true", default=False)
parser.add_argument("--profile", action="store_true", default=False)
parser.add_argument("--cpu", action="store_true", default=False)
parser.add_argument("--model", type=str, help="Any torchvision or timm model name (str)", default="resnet101")
parser.add_argument("--debug", action="store_true", default=False, help="Display additional debug information")
parser.add_argument("--amp", action="store_true", default=False, help="Activate torch AMP")
parser.add_argument(
"--multi_tensor_optim", action="store_true", default=False, help="Use the faster multi-tensor optimizers"
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
logging.info("Benchmark arguments: %s" % args)
BACKEND = "nccl" if (not args.gloo or not torch.cuda.is_available()) and not args.cpu else "gloo"
# Download dataset once for all processes
setup_cached_mnist()
# Benchmark the different configurations, via multiple processes
if args.optim_type == OptimType.vanilla or args.optim_type == OptimType.everyone:
logging.info("\n*** Benchmark vanilla optimizer")
mp.spawn(
train, # type: ignore
args=(args, BACKEND, OptimType.vanilla, False), # no regression check
nprocs=args.world_size,
join=True,
)
if args.optim_type == OptimType.oss_ddp or args.optim_type == OptimType.everyone:
logging.info("\n*** Benchmark OSS with DDP")
mp.spawn(
train,
args=(args, BACKEND, OptimType.oss_ddp, args.check_regression),
nprocs=args.world_size,
join=True, # type: ignore
)
if args.optim_type == OptimType.oss_sharded_ddp or args.optim_type == OptimType.everyone:
logging.info("\n*** Benchmark OSS with ShardedDDP")
mp.spawn(
train, # type: ignore
args=(
args,
BACKEND,
OptimType.oss_sharded_ddp,
args.check_regression,
),
nprocs=args.world_size,
join=True,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from functools import reduce
import logging
import operator
import datasets.wikitext2_data as wikitext2_data
from models import transformer_lm
import numpy as np
import torch
from torch.optim import Adam
def init_random_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def init_args():
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument(
"--checkpoint",
default="never",
choices=["always", "except_last", "never"],
help="Checkpointing strategy for pipe",
)
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--use_synthetic_data", action="store_true", help="Uses synthetic data for running benchmarks.")
parser.add_argument("--dry_run", action="store_true", help="Run a sample training run without regression testing.")
parser.add_argument(
# TODO(anj-s): In the process of adding more models and hence the requirement for a flag.
"--model_name",
default="lm",
help="Language Model(LM) used to benchmark nn.pipe.",
)
parser.add_argument("--debug", action="store_true", default=False, help="Display additional debug information")
args = parser.parse_args()
return args
def create_benchmark_config(model_name, config_class):
"""Return a dict with configurations required for benchmarking `model_name` model."""
if model_name == "lm":
return config_class.get_benchmark_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def get_model_specs(model_name, config_class):
"""Return a dict with configurations required for configuring `model_name` model."""
if model_name == "lm":
return config_class.get_model_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % model_name)
def create_model_config(args, benchmark_config=None, model_specs=None, device=None):
"""Return a dict with the given model, dataset and optimizer."""
if not device:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dataset_info = get_dataset_info(args)
assert model_specs is not None
model_specs["vocab_size"] = dataset_info.ntokens
model, optimizer = get_model_and_optimizer(args, device, benchmark_config, model_specs)
return {
"model": model,
"optimizer": optimizer,
"dataset_info": dataset_info,
}
def get_model_and_optimizer(args, device, benchmark_config, model_config):
"""Return instantiated model and optimizer function."""
if args.model_name == "lm":
model = get_lm_model(args, device, model_config)
lr = benchmark_config["lr"]
def make_adam(params):
return Adam(params, lr=lr)
optimizer = make_adam
return model, optimizer
def get_lm_model(args, device, config):
"""Get language model(based on GPT-2) used for sequence prediction."""
ninp = config["ninp"]
nhead = config["nhead"]
initrange = config["initrange"]
dropout = config["dropout"]
vocab_size = config["vocab_size"]
nhid = config["nhid"]
ndecoder = config["num_decoder_layers"]
is_moe = config.get("is_moe", False)
num_local_experts = config.get("num_local_experts", 1)
if args.lazy_construction:
layers = [
LazyModule(lambda: transformer_lm.EmbeddingLayer(vocab_size, ninp, initrange)),
LazyModule(lambda: transformer_lm.PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(
LazyModule(
lambda: transformer_lm.TransformerDecoderLayer(
ninp, nhead, nhid, dropout, is_moe, num_local_experts
)
)
)
layers.append(LazyModule(lambda: transformer_lm.LinearLayer(ninp, vocab_size, initrange)))
model = layers
else:
model = transformer_lm.TransformerLM(
vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder, is_moe, num_local_experts
).to(device)
return model
def log_number_of_parameters(model, logger=None):
if not logger:
logger = logging
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if hasattr(model, "group"):
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logger.debug(
f"training model, #params = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logger.debug(f"total #prams = {total.item()}")
else:
logger.debug(f"training model, #params = {num_params}")
def get_dataset_info(args):
assert args.model_name == "lm"
if args.use_synthetic_data:
return wikitext2_data.get_synthetic_datasets()
else:
return wikitext2_data.get_real_datasets()
def get_data_loader(dataset_info, args, benchmark_config, model_specs, num_replicas=1, rank=0):
return wikitext2_data.get_dataloaders(dataset_info, benchmark_config, model_specs, num_replicas, rank)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import time
from golden_configs.lm_wikitext2 import MOE as MOEConfig
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import utils
MPI_PORT = 29500
def benchmark_single_process(config_class, args):
"""Benchmark a given model using a single process and multiple devices."""
world_size = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert world_size > 0
benchmark_config = utils.create_benchmark_config(args.model_name, config_class)
model_specs = utils.get_model_specs(args.model_name, config_class)
mp.spawn(train, args=(world_size, benchmark_config, model_specs, args), nprocs=world_size, join=True)
def train(rank, world_size, benchmark_config, model_specs, args):
logger = mp.log_to_stderr()
logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
utils.init_random_seed(rank)
init_method_pgroup = "tcp://localhost:{}".format(MPI_PORT)
torch.distributed.init_process_group(
backend="nccl", rank=rank, world_size=world_size, init_method=init_method_pgroup
)
logger.info("train, rank={}".format(rank))
device = torch.device("cuda", rank) if torch.cuda.is_available() else torch.device("cpu")
criterion = benchmark_config["criterion"]
model_config = utils.create_model_config(
args, benchmark_config=benchmark_config, model_specs=model_specs, device=device
)
# vocab_size may change in create_model_config() due to input data
vocab_size = model_specs["vocab_size"]
model = model_config["model"]
model.train()
optimizer = model_config["optimizer"]
optimizer = optimizer(model.parameters())
group = model.group if hasattr(model, "group") else None
utils.log_number_of_parameters(model, logger)
total_loss = 0.0
word_counter = 0
total_tokens = 0
total_tokens_per_log_interval = 0
bptt = 2
total_elapsed = 0.0
model = DDP(model, device_ids=[rank], output_device=rank, broadcast_buffers=False)
lm_dataloader, _, _ = utils.get_data_loader(
model_config["dataset_info"], args, benchmark_config, model_specs, num_replicas=world_size, rank=rank
)
def get_batch(source):
seq_len = len(source) - 1
data = source[0:seq_len]
target = source[1 : 1 + seq_len]
return data, target
for i, batch in enumerate(lm_dataloader):
if i == 1:
epoch_start_time = time.time()
if args.max_batch and i > args.max_batch:
break
if i > 0:
total_tokens += batch.numel()
start_time = time.time()
optimizer.zero_grad()
source, target = get_batch(batch)
source = source.to(device)
target = target.to(device)
try:
output = model(source.to(device))
loss = criterion(output.view(-1, vocab_size), target.view(-1))
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), model_specs["clip_value"])
optimizer.step()
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
elapsed = time.time() - start_time
total_elapsed += elapsed
log_interval = 1
total_tokens_per_log_interval += batch.numel()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
logger.debug(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, total_tokens_per_log_interval / elapsed, cur_loss, math.exp(cur_loss)
)
)
total_tokens_per_log_interval = 0
total_loss = 0
wps = total_tokens / total_elapsed
logger.debug("rank {}, wps: {}".format(rank, wps))
logger.debug(
"Peak allocated bytes on cuda:{}: {:1d}".format(
dist.get_rank(), torch.cuda.memory_stats(dist.get_rank())["allocated_bytes.all.peak"]
)
)
if __name__ == "__main__":
args = utils.init_args()
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
logging.info(f"Running single process benchmark with args: {args}")
benchmark_single_process(MOEConfig, args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import gc
import logging
import math
import time
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.nn.parallel import DistributedDataParallel as DDP
import utils
from benchmarks.golden_configs.lm_wikitext2 import Pipe as lm_wikitext2
from fairscale.fair_dev.testing.testing import dist_init
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
MPI_PORT = 29500
RPC_PORT = 29501
def get_tensors_by_size_bucket():
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def get_device(model, index):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if hasattr(model, "devices"):
return model.devices[index]
else:
return torch.cuda.current_device()
def get_fake_dataloader(lm_dataloader_len, args):
fake_input = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return fake_input
def __len__(self):
return lm_dataloader_len
return FakeDataset()
def train(model_config, model, benchmark_config, model_specs, args):
lm_dataloader, _, _ = utils.get_data_loader(model_config["dataset_info"], args, benchmark_config, model_specs)
criterion = benchmark_config["criterion"]
vocab_size = model_specs["vocab_size"]
optimizer = model_config["optimizer"]
model.train()
utils.log_number_of_parameters(model)
total_loss = 0.0
word_counter = 0
optimizer = optimizer(model.parameters())
pipe_group = model.group if hasattr(model, "group") else None
# TODO(anj-s): Avoid sending fake data to all replicas except the first and last one.
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
lm_dataloader, _, _ = get_synthetic_dataloaders(args, benchmark_config, model_specs)
total_tokens = 0
total_tokens_per_log_interval = 0
bptt = 2
start_time = time.time()
epoch_start_time = 0.0
def get_batch(source):
seq_len = len(source) - 1
data = source[0:seq_len]
target = source[1 : 1 + seq_len]
return data, target
for i, batch in enumerate(lm_dataloader):
if i == 1:
epoch_start_time = time.time()
source, target = get_batch(batch)
if args.max_batch and i > args.max_batch:
break
if i > 0:
total_tokens += source.numel()
optimizer.zero_grad()
try:
if pipe_group is None or pipe_group.rank() == 0:
tmp = source.to(get_device(model, 0))
output = model(tmp)
else:
output = model(source)
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = target.to(get_device(model, -1))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
loss.backward()
del target
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), model_specs["clip_value"])
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
total_tokens_per_log_interval += source.numel()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
if dist.get_rank() == dist.get_world_size() - 1:
logging.debug(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, total_tokens_per_log_interval / elapsed, cur_loss, math.exp(cur_loss)
)
)
total_tokens_per_log_interval = 0
total_loss = 0
start_time = time.time()
if epoch_start_time != 0:
wps = total_tokens / (time.time() - epoch_start_time)
else:
raise RuntimeError(
"Unable to benchmark on a single batch. Increase the size " " of the dataset and rerun the benchmark."
)
if dist.get_rank() == dist.get_world_size() - 1:
return wps, loss.item()
else:
return 0.0, 0.0
# TODO(anj-s): Add an option for users to be able to benchmark evaluate.
def evaluate(eval_model, data_source, criterion, ntokens):
eval_model.eval()
total_loss = 0.0
# TODO(anj-s): Move this to the benchmark config if we want to benchmark evaluation.
bptt = 35
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def verify_peak_memory(rank, golden_config, std_dev):
logging.debug(
"Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"])
)
current_device_usage = torch.cuda.memory_stats(rank)["allocated_bytes.all.peak"]
golden_ref = golden_config["peak_mem_usage"][rank]
if not current_device_usage < golden_ref * std_dev:
raise RuntimeError(
"Peak memory usage for cuda device {:d} is {:d} which"
"is less than golden reference value of {:d}".format(rank, current_device_usage, golden_ref)
)
def verify_lm_run(wps, golden_config, args):
"""Verify that words per second for a given benchmark run matches the golden data."""
if dist.get_rank() == dist.get_world_size() - 1:
# Assert that words per second is within 3 standard deviations of the average
# of five golden runs
logging.info("Throughput(wps) is {:.2f}.".format(wps))
if not wps > (golden_config["avg_wps"] - (3 * golden_config["std_dev_wps"])):
raise RuntimeError(
"Throughput(wps):{:.2f} is below the golden threshold of an "
"average value of {:.2f} and standard dev of {:.2f}.".format(
wps, golden_config["avg_wps"], golden_config["std_dev_wps"]
)
)
for i in range(4):
verify_peak_memory(i, golden_config, 1.1)
def benchmark_language_model(model_config, model, benchmark_config, model_specs, config_class, args):
golden_config = get_golden_config(args.model_name, config_class, args)
epoch = benchmark_config["epochs"]
start_time = time.time()
if dist.get_rank() == dist.get_world_size() - 1:
logging.debug("-" * 110)
logging.debug("| start of epoch {:1d}".format(epoch))
logging.debug("-" * 110)
wps, loss = train(model_config, model, benchmark_config, model_specs, args)
elapsed_time = time.time() - start_time
if dist.get_rank() == dist.get_world_size() - 1:
logging.debug("-" * 110)
logging.debug("| end of epoch {:1d} | time: {:5.2f}s | train loss {:5.2f} ".format(epoch, elapsed_time, loss))
logging.debug("-" * 110)
logging.debug("Throughput(wps) is {:.2f}.".format(wps))
logging.debug(
"Peak allocated bytes on cuda:{}: {:1d}".format(
dist.get_rank(), torch.cuda.memory_stats(dist.get_rank())["allocated_bytes.all.peak"]
)
)
if len(model.balance) == 4:
if args.model_name == "lm":
verify_lm_run(wps, golden_config, args)
else:
raise RuntimeError("Unrecognized args.model_name " % args.model_name)
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def get_golden_config(model_name, config_class, args):
"""Return a dict with the golden data for throughput and memory usage."""
if model_name == "lm":
return config_class.get_golden_real_stats()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def benchmark_single_process(config_class, args):
"""Benchmark a given model using a single process and multiple devices."""
init_method_pgroup = "tcp://localhost:{}".format(MPI_PORT)
torch.distributed.init_process_group(backend="gloo", rank=0, world_size=1, init_method=init_method_pgroup)
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
utils.init_random_seed(0)
benchmark_config = utils.create_benchmark_config(args.model_name, config_class)
model_specs = utils.get_model_specs(args.model_name, config_class)
model_config = utils.create_model_config(args, benchmark_config=benchmark_config, model_specs=model_specs)
model = model_config["model"]
balance = generate_balance(min(num_devices, 4), len(model))
pipe_model = Pipe(model, balance, chunks=args.chunks, checkpoint=args.checkpoint)
del model
del model_config["model"]
if args.dry_run:
train(model_config, pipe_model, benchmark_config, model_specs, args)
else:
benchmark_language_model(model_config, pipe_model, benchmark_config, model_specs, config_class, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
utils.init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
if __name__ == "__main__":
args = utils.init_args()
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
logging.info(f"Running single process benchmark with args: {args}")
benchmark_single_process(lm_wikitext2, args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import math
import os
import sys
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.experimental.nn.ampnet_pipe import pipe
from fairscale.fair_dev.testing.testing import dist_init, get_worker_map
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule
from fairscale.optim import GradScaler
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequntial(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequeitnal
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequntial, self).__init__(*layers)
class MySGD(Optimizer):
r"""
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
"""
def __init__(self, params, lr):
defaults = dict(lr=lr)
super(MySGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(MySGD, self).__setstate__(state)
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
p.data.add_(d_p, alpha=-group["lr"])
return loss
class SpectrainSGDMomentum(Optimizer):
r"""
Implements a SGD with momentum optimizer with Spectrain based weight
prediction. Please refer to the spectrain paper: https://arxiv.org/pdf/1809.02839.pdf
for more details.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate (required)
momentum (float): momentum (default=0.9)
"""
def __init__(self, params, lr, momentum=0.9):
defaults = dict(lr=lr, momentum=momentum)
params = list(params)
super(SpectrainSGDMomentum, self).__init__(params, defaults)
self.old_weights = None
self.cur_params, self.reference_params = self.prep_param_copies(params)
for group in self.param_groups:
for p in group["params"]:
if momentum != 0:
param_state = self.state[p]
param_state["momentum_buffer"] = torch.zeros_like(p.data)
def __setstate__(self, state):
super(SpectrainSGDMomentum, self).__setstate__(state)
def prep_param_copies(self, params):
model_params = [param for param in params if param.requires_grad]
reference_params = [param.clone().detach() for param in model_params]
for param in reference_params:
param.requires_grad = True
return model_params, reference_params
def copy_params(self, master_params, model_params):
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
def modify_reference_params_using_current_params(self):
self.copy_params(self.cur_params, self.reference_params)
def modify_current_params_using_reference_params(self):
self.copy_params(self.reference_params, self.cur_params)
# chunk_index and chunks parameters are for unused for spectrain usecase
def update_weight_using_future_predictions(self, model_index, num_gpus, chunk_index, chunks, forward):
if forward:
# In forward pass:
# 1. clone weights to self.old_weights
# 2. predict new weights and modify
self.modify_reference_params_using_current_params()
for group in self.param_groups:
multiplier = group["lr"] * (model_index // 2 + num_gpus - model_index - 1)
for p in group["params"]:
param_state = self.state[p]
p.data.sub_(param_state["momentum_buffer"].data, alpha=multiplier)
else:
# In backward pass:
# 1. load old weights
# 2. predict new weights and modify
self.modify_current_params_using_reference_params()
for group in self.param_groups:
multiplier = group["lr"] * (model_index // 2)
for p in group["params"]:
param_state = self.state[p]
p.data.sub_(param_state["momentum_buffer"].data, alpha=multiplier)
def step(self, weight_prediction=True, closure=None):
"""Performs a single optimization step.
Args:
weight_prediction (bool, optional): Enable weight prediction based updates
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
if weight_prediction:
self.modify_current_params_using_reference_params()
for group in self.param_groups:
momentum = group["momentum"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
buf = param_state["momentum_buffer"]
buf.data.mul_(momentum).add_(d_p, alpha=1 - momentum)
d_p = buf
p.data.add_(d_p, alpha=-group["lr"])
return loss
class XpipeAdam(Optimizer):
r"""Implements Xpipe approach on top of Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
The implementation of the L2 penalty follows changes proposed in
`Decoupled Weight Decay Regularization`_.
Xpipe details can be found here: https://arxiv.org/abs/1911.04610
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
params = list(params)
super(XpipeAdam, self).__init__(params, defaults)
self.cur_params, self.master_params = self.prep_param_copies(params)
_, self.forward_params = self.prep_param_copies(params)
_, self.backward_params = self.prep_param_copies(params)
for group in self.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["step"] = 0
# Exponential moving average of gradient values
param_state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
param_state["exp_avg_sq"] = torch.zeros_like(p.data)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("amsgrad", False)
def prep_param_copies(self, params):
model_params = [param for param in params if param.requires_grad]
reference_params = [param.clone().detach() for param in model_params]
for param in reference_params:
param.requires_grad = True
return model_params, reference_params
def copy_params(self, master_params, model_params):
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
def update_weight_using_future_predictions(
self, model_index, num_gpus, current_microbatch_index, microbatches_per_minibatch, forward
):
if forward:
# Forward pass overview:
# if bell-weather:
# 1. read from master copy
# 2. predict and modify
# 3. flush updates to forward copy
# else:
# 1. read from forward copy
if current_microbatch_index % microbatches_per_minibatch == 0:
# read from master copy
self.copy_params(self.master_params, self.cur_params)
microbatch_index = current_microbatch_index + 1
# predict and modify
for group in self.param_groups:
multiplier = group["lr"] * round(
(microbatch_index + num_gpus - model_index / 2 - 2) / microbatch_index
)
beta1, beta2 = group["betas"]
eps = group["eps"]
for p in group["params"]:
param_state = self.state[p]
temp1 = param_state["exp_avg"].data / (1 - beta1)
temp2 = ((param_state["exp_avg_sq"].data / (1 - beta2)) + eps).sqrt()
p.data.addcdiv_(temp1, temp2, value=-multiplier)
# flush updates to forward copy
self.copy_params(self.cur_params, self.forward_params)
else:
self.copy_params(self.forward_params, self.cur_params)
else:
# Backward pass overview:
# if bell-weather:
# 1. read from master copy
# 2. predict and modify
# 3. flush updates to backward copy
# else:
# 1. read from backward copy
if current_microbatch_index % microbatches_per_minibatch == 0:
# read from master copy
self.copy_params(self.master_params, self.cur_params)
microbatch_index = current_microbatch_index + 1
# predict and modify
for group in self.param_groups:
multiplier = group["lr"] * (microbatch_index + model_index // 2 - 1) // microbatch_index
beta1, beta2 = group["betas"]
eps = group["eps"]
for p in group["params"]:
param_state = self.state[p]
temp1 = param_state["exp_avg"].data / (1 - beta1)
temp2 = ((param_state["exp_avg_sq"].data / (1 - beta2)) + eps).sqrt()
p.data.addcdiv_(temp1, temp2, value=-multiplier)
# flush updates to forward copy
self.copy_params(self.cur_params, self.backward_params)
else:
self.copy_params(self.backward_params, self.cur_params)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
amsgrad = group.get("amsgrad", False)
p_data = p.data
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data)
else:
state["exp_avg"] = state["exp_avg"].to(p_data)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(p_data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
exp_avg_data = exp_avg.data
exp_avg_sq_data = exp_avg_sq.data
# Decay the first and second moment running average coefficient
exp_avg_data.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq_data.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq_data, out=max_exp_avg_sq_data)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq_data.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data.add_(p_data, alpha=-group["weight_decay"] * group["lr"])
p_data.addcdiv_(exp_avg_data, denom, value=-step_size)
return loss
def get_data(device):
with warnings.catch_warnings(record=True) as fjldska:
TEXT = torchtext.data.Field(
tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
ntokens = len(TEXT.vocab.stoi)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size, TEXT, device)
val_data = batchify(val_txt, eval_batch_size, TEXT, device)
test_data = batchify(test_txt, eval_batch_size, TEXT, device)
return ntokens, train_data, val_data, test_data
def batchify(data, bsz, TEXT, device):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
if args.lazy_construction:
layers = [
LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),
LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))
model = layers
else:
model = TransformerLMSequntial(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
return Adam(model.parameters(), lr=lr)
def make_custom_optimizer(model, args):
if args.xpipe:
return XpipeAdam(model.parameters(), lr=lr)
elif args.spectrain:
return SpectrainSGDMomentum(model.parameters(), lr=lr)
else:
return MySGD(model.parameters(), lr=lr)
optimizer = make_custom_optimizer
scaler = GradScaler()
return model, criterion, optimizer, scaler
class AsyncDelegate(object):
def __init__(self, vocab_size, iteration_per_batch=1000):
self.cur_epoch = 0
self.cur_iteration = 0
self.iteration_per_batch = iteration_per_batch
self.vocab_size = vocab_size
self.word_counter = 0
self.start_time = time.time()
self.log_interval = 1
self.total_loss = 0
def transform_input(self, cur_batch):
return cur_batch["input"]
def transform_target(self, cur_batch):
return cur_batch["target"].view(-1)
def log_loss(self, cur_batch, loss, count):
self.word_counter += cur_batch["ntokens"]
if count % self.log_interval == 0 and count > 0:
self.total_loss += loss.item()
cur_loss = self.total_loss / self.log_interval
elapsed = time.time() - self.start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
count, self.word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
self.word_counter = 0
self.total_loss = 0
self.start_time = time.time()
def transform_output_before_loss(self, output_tensor):
return output_tensor.view(-1, self.vocab_size)
def check_and_save_weights(self, num_gradients):
pass
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
from functools import reduce
import operator
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if model.group:
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #prams = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #prams = {num_params}")
vocab_size = 10000 # FIXME
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model, args)
transform_and_log = AsyncDelegate(vocab_size)
model.interleave(
lm_dataloader, criterion, optimizer, transform_and_log, args.min_update_interval, args.spectrain or args.xpipe
)
if model.group.rank() == model.group.size() - 1:
print("Done with an epoch")
def evaluate(eval_model, data_source, criterion, bptt, ntokens):
eval_model.eval()
total_loss = 0.0
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
epoch = 1
bptt = 35
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
epoch_start_time = time.time()
train(train_data, model, criterion, optimizer, bptt, ntokens, args)
val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)
print("-" * 89)
print(
"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
epoch, (time.time() - epoch_start_time), val_loss
)
)
print("-" * 110)
elapsed_time = time.time() - start_time
nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
wps = nwords / elapsed_time
test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)
print("=" * 89)
print(
"| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
test_loss, elapsed_time, nwords, wps
)
)
print("=" * 110)
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device, new_data: bool = True):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if new_data:
vocab_size = 10000
model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
else:
data = get_data(device)
ntokens, train_data, val_data, test_data = data
model, criterion, optimizer, scaler = make_model(args, device, ntokens)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": data,
}
def run_mp_worker(args, available_workers):
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance(get_pipeline_parallel_group().size(), len(model))
p = pipe.AMPnetPipe(
module=model,
balance=balance,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
checkpoint=args.checkpoint,
)
if torch.cuda.is_available():
p = p.cuda()
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def bench_multi_process(args, all_at_once=False):
if args.local_world_size != 0:
world_size = args.local_world_size
else:
world_size = min(torch.cuda.device_count(), 2)
mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)
best_device_map = {
0: "mlx5_0:1",
1: "mlx5_0:1",
2: "mlx5_1:1",
3: "mlx5_1:1",
4: "mlx5_2:1",
5: "mlx5_2:1",
6: "mlx5_3:1",
7: "mlx5_3:1",
}
def bench_mpi(args):
guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
os.environ["UCX_NET_DEVICES"] = best_device_map[local_rank]
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10638"
if args.socket_name:
os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
os.environ["TP_SOCKET_IFNAME"] = args.socket_name
torch.distributed.init_process_group(backend="gloo", rank=guess_rank, world_size=world_size)
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(rpc_timeout=20, init_method=init_method),
)
backends = {"model_parallel_backend": "nccl", "pipeline_backend": "mpi", "ddp_backend": "nccl"}
initialize_model_parallel(1, world_size, **backends)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument("--spectrain", action="store_true", default=False, help="Use spectrain based weight prediction")
parser.add_argument("--xpipe", action="store_true", default=False, help="Use xpipe based weight prediction")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument("--min-update-interval", type=int, default=1, help="min update interval for ampnet")
"""
To run the script,
1. please build a suitable version of OpenMPI with a cuda-enabled UCX backend.
2. For running on 2 gpus:
<open-mpi-installed-dir>/bin/mpirun --host localhost:8 -np 2 --map-by node --mca pml ucx -x UCX_TLS=rc,sm,cuda_ipc,cuda_copy -x PYTHONPATH=$PWD -x PATH=$PATH -x LD_LIBRARY_PATH=$LD_LIBRARY_PATH -x UCX_RNDV_SCHEME=put_zcopy -x UCX_MEMTYPE_CACHE=n python3 benchmarks/experimental/experimental_async_approaches.py --num-decoder-layers=8 --host localhost --batch-size 4
3. For doing Spectrain based weight prediction, add `--spectrain` to the training command line argument.
4. For doing Xpipe based weight prediction, add `--xpipe` to the training command line argument.
"""
if __name__ == "__main__":
args = parser.parse_args()
# bench_multi_process(args, all_at_once=True)
if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
print("Can't run benchmark")
sys.exit(1)
else:
if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
print(f"Running benchmark with args: {args}")
bench_mpi(args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
from pprint import pprint
from statistics import mean
import time
import torch
from torch import nn
from torch.cuda import Event
from fairscale.experimental.nn import MEVO, BaselineSoftmaxNllLoss
from fairscale.experimental.nn.mevo import get_data
"""Benchmarking the MEVO kernel and its Baseline."""
SHAPES = [
# name, activation, FC weights
("1k_128h_256k", (1024, 128), (128, 256 * 1024)),
# ("4k_128h_256k", (4096, 128), (128, 256 * 1024)),
# ("8k_4k_32k", (4 * 2048, 4 * 1024), (4 * 1024, 32 * 1024)),
# ("24k_4k_50k", (12 * 2048, 4 * 1024), (4 * 1024, 50 * 1024)),
# ("8k_4k_256k", (4 * 2048, 4 * 1024), (4 * 1024, 256 * 1024)),
# ("8k_4k_256008", (4 * 2048, 4 * 1024), (4 * 1024, 256008)), # max seq len for base is 2100, 2300 for top-k
# ("xk_4k_256008", (1 * 2048, 4 * 1024), (4 * 1024, 256008)),
]
KERNELS = [
BaselineSoftmaxNllLoss,
MEVO,
]
def run_on_gpu(kernel, data, repeats, no_grad, fwd_bwd):
"""Measure both GPU runtime and peak memory usage of a kernel."""
tokens = data[0].shape[0]
def get_cuda_data():
"""Move the data from CPU to GPU. We make a new weight parameter with this call."""
with torch.no_grad():
i, w, t = data # i, t are tensors, w is a param
w = nn.Linear(w.shape[1], w.shape[0], bias=False, dtype=w.dtype, device="cuda").weight
assert w.requires_grad
return i.cuda().requires_grad_(True), w, t.cuda()
def _test(kernel_obj, event):
"""Forward and backward passes."""
context = contextlib.suppress()
if no_grad:
context = torch.no_grad()
with context:
if event is not None:
event.record()
out = kernel_obj(input, target)
if fwd_bwd:
assert not no_grad
out.backward()
del out
if fwd_bwd:
assert input.grad is not None, input
assert weight.grad is not None, weight
assert target.grad is None, target
input.grad = None
weight.grad = None
def _get_kernel():
"""Get a kernel instance."""
return kernel(weight, tile_factor=16)
#
# Run the test once to measure memory.
#
# Ensure GPU memory is clean, empty, 0.
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
cur_mem_before = round(torch.cuda.memory_allocated() / 1024 / 1024)
assert cur_mem_before == 0, cur_mem_before
# Move tensors to GPU.
input, weight, target = get_cuda_data()
# Create the kernel
k = _get_kernel()
_test(k, None)
# Might wait for gpu here
torch.cuda.synchronize()
# Free memory, ensure everything is clean, no leak.
del k
del input
del weight
del target
cur_mem_after = round(torch.cuda.memory_allocated() / 1024 / 1024)
assert cur_mem_after == 0, cur_mem_after
# Get peak mem
peak_mem_after = round(torch.cuda.max_memory_allocated() / 1024 / 1024)
peak_mem = peak_mem_after - cur_mem_before
#
# Run multiple times to get both CPU timing and average GPU timing.
#
# Move tensors to GPU and get k, again.
input, weight, target = get_cuda_data()
k = _get_kernel()
# Get the events
events = [Event(enable_timing=True) for _ in range(repeats + 1)]
# Queue the ops to GPU
cpu_start_time = time.time()
for i in range(repeats):
_test(k, events[i])
events[i + 1].record() # end time of the last run
# CPU could be done much sooner than the GPU here.
cpu_time = time.time() - cpu_start_time
# Might wait for gpu here
torch.cuda.synchronize()
# Get the durations
durations = [cpu_time * 1000] # convert CPU time, from seconds to ms.
for x, y in zip(events, events[1:]):
durations.append(x.elapsed_time(y))
assert len(durations) == repeats + 1
# Free memory
del k
input, weight, target = None, None, None
cur_mem_after = round(torch.cuda.memory_allocated() / 1024 / 1024)
assert cur_mem_after == 0, cur_mem_after
# Skip 2 for cpu time and first warm up time to compute the average.
time_per_call = mean(durations[2:]) # ms
time_per_token = time_per_call * 1000 / tokens # us
return peak_mem, durations[:2] + [time_per_call, time_per_token]
def main():
parser = argparse.ArgumentParser("Benchmarking MEVO")
parser.add_argument("--dtype", type=str, choices=["fp16", "fp32"], default="fp16")
parser.add_argument("--grad", type=str, choices=["grad", "no_grad"], default="grad")
parser.add_argument("--fwd_bwd", action="store_true", default=False)
args = parser.parse_args()
repeats = 9
results = {}
results["peak cached"] = {}
results["durations"] = {}
for shape in SHAPES:
name = shape[0]
results["peak cached"][name] = {}
results["durations"][name] = {}
dtype = torch.float32 if args.dtype == "fp32" else torch.float16
# Use cpu memory to ensure we always start with an empty GPU
data = get_data(shape[1:], dtype, "cpu")
for kernel in KERNELS:
k_name = kernel.__name__
no_grad = args.grad
print(f"Running {k_name} with {name} {dtype} {no_grad} data")
peak_mem, durations = run_on_gpu(kernel, data, repeats, no_grad == "no_grad", args.fwd_bwd)
results["peak cached"][name][k_name] = peak_mem
results["durations"][name][k_name] = durations
pprint(results)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils.data import Dataset
# TODO(sidgoyal): Refactor benchmarks to remove this file eventually.
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"input": src_tokens,
"target": tgt_tokens,
}
return batch
class BenchmarkLMDataset(Dataset):
"""
Dataset to benchmark a translation like seq2seq task.
Args:
vocab_size (int, optional): size of the vocabulary (default 10000).
max_source_positions (int, optional): max number of tokens in the
source sentence (default: 1024).
total_samples (int, optional): the total number of rows in the
dataset (default: 10000).
"""
def __init__(
self,
vocab_size=10000,
max_source_positions=1024,
total_samples=10000,
):
self.vocab_size = vocab_size
self.max_source_positions = max_source_positions
self.total_samples = total_samples
self.sizes = [self.max_source_positions] * self.total_samples
def __getitem__(self, index):
length = self.sizes[index]
source = torch.randint(1, self.vocab_size, (length,))
target = source.clone()
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return self.total_samples
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import time
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
import fairscale.experimental.nn
def benchmark_bn(rank, world_size, init_file, bn_cls):
dist.init_process_group(dist.Backend.NCCL, init_method="file://" + init_file, rank=rank, world_size=world_size)
x = torch.randn(50, 2048, 7, 7).to(rank)
bn = bn_cls(2048).to(rank)
bn = DDP(bn, device_ids=[rank])
# Warmup
for i in range(50):
with torch.no_grad():
x = bn(x)
torch.cuda.synchronize(rank)
t0 = time.time()
for i in range(100):
with torch.no_grad():
x = bn(x)
torch.cuda.synchronize(rank)
t1 = time.time()
print("Elapsed time is ", t1 - t0)
if __name__ == "__main__":
world_size = torch.cuda.device_count()
for cls in [torch.nn.BatchNorm2d, torch.nn.SyncBatchNorm, fairscale.experimental.nn.SyncBatchNorm]:
print(cls)
mp.spawn(benchmark_bn, args=(world_size, tempfile.mkstemp()[1], cls), nprocs=world_size)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
from functools import reduce
import logging
import math
import operator
import time
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data.dataloader import DataLoader
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
from benchmarks.datasets.wikitext2_data import get_real_dataloaders as get_real_wikitext2_dataloaders
from benchmarks.datasets.wikitext2_data import get_synthetic_dataloaders as get_synthetic_wikitext2_dataloaders
from benchmarks.golden_configs.lm_wikitext2 import Offload_Sequential as offload_seq
from benchmarks.golden_configs.lm_wikitext2 import Offload_Transformer as lm_wikitext2
from benchmarks.models import transformer_lm
from fairscale.experimental.nn.offload import OffloadModel
def init_random_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def get_model_and_optimizer(args, device, benchmark_config, model_specs):
"""Return instantiated model and optimizer function."""
if args.model_name == "lm":
model = get_lm_model(args, device, model_specs)
lr = benchmark_config["lr"]
def make_adam(params):
return Adam(params, lr=lr)
optimizer = make_adam
elif args.model_name == "seq":
model = get_seq_model(args, device, model_specs)
optimizer = torch.optim.SGD
model = OffloadModel(
model=model,
device=torch.device("cuda"),
offload_device=torch.device("cpu"),
num_slices=benchmark_config["slices"],
checkpoint_activation=benchmark_config["checkpoint_activation"],
num_microbatches=benchmark_config["num_microbatches"],
)
return model, optimizer
def get_seq_model(args, device, model_specs):
model = torch.nn.Sequential(
torch.nn.Linear(model_specs["inputs"] * model_specs["inputs"], model_specs["hidden"]),
*([torch.nn.Linear(model_specs["hidden"], model_specs["hidden"]) for _ in range(model_specs["layers"])]),
torch.nn.Linear(model_specs["hidden"], model_specs["outputs"]),
)
return model.cpu()
def get_lm_model(args, device, config):
"""Get language model(based on GPT-2) used for sequence prediction."""
ninp = config["ninp"]
nhead = config["nhead"]
initrange = config["initrange"]
dropout = config["dropout"]
vocab_size = config["vocab_size"]
nhid = config["nhid"]
ndecoder = config["num_decoder_layers"]
return transformer_lm.TransformerLM(vocab_size, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
def log_number_of_parameters(model):
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
logging.info(f"training model, #params = {num_params}")
def _get_fp16_context(use_fp16=False):
if use_fp16:
return torch.cuda.amp.autocast()
else:
return contextlib.nullcontext()
def _get_profiler_context(use_profiler=False):
if use_profiler:
return torch.autograd.profiler.profile(use_cuda=True, profile_memory=True)
else:
return contextlib.nullcontext()
def _get_profiler_record_context(record_name, use_profiler=False):
if use_profiler:
return torch.autograd.profiler.record_function(record_name)
else:
return contextlib.nullcontext()
def train_seq(model_config, benchmark_config, model_specs, args):
device = torch.device("cuda")
torch.cuda.set_device(0)
torch.manual_seed(5)
model = model_config["model"]
criterion = benchmark_config["criterion"]
optimizer = model_config["optimizer"](model.parameters(), lr=benchmark_config["lr"])
dataloader, _, _ = model_config["data"]
def train_epoch(args, num_iters):
model.train()
for batch_inputs, batch_outputs in dataloader:
batch_inputs, batch_outputs = batch_inputs.to("cuda"), batch_outputs.to("cuda")
start = time.time_ns()
with _get_profiler_context(args.use_profiler) as prof:
optimizer.zero_grad()
inputs = batch_inputs.reshape(-1, model_specs["inputs"] * model_specs["inputs"])
with _get_profiler_record_context("model_training", args.use_profiler):
with _get_fp16_context(use_fp16=args.use_fp16):
output = model(inputs)
loss = criterion(output, target=batch_outputs)
loss.backward()
optimizer.step()
logging.info(
"Memory stats are {:.2f}GB".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] / 2**30)
)
logging.info(
"Loss {:.2f} - throughput {:.2f}fps".format(
loss.item(), benchmark_config["batch_size"] / (time.time_ns() - start) * 10**9
)
)
num_iters -= 1
if num_iters == 0:
break
if args.use_profiler:
prof.export_chrome_trace("/tmp/offload_prof")
train_epoch(args, num_iters=5)
def train(model_config, model, benchmark_config, model_specs, args):
device = torch.device("cuda")
torch.cuda.set_device(0)
lm_dataloader, _, _ = model_config["data"]
criterion = benchmark_config["criterion"]
vocab_size = model_specs["vocab_size"]
optimizer = model_config["optimizer"]
model.train()
log_number_of_parameters(model)
total_loss = 0.0
word_counter = 0
optimizer = optimizer(model.parameters())
total_tokens = 0
total_tokens_per_log_interval = 0
bptt = 2
start_time = time.time()
epoch_start_time = 0.0
def get_batch(source):
seq_len = len(source) - 1
data = source[0:seq_len]
target = source[1 : 1 + seq_len]
return data, target
for i, batch in enumerate(lm_dataloader):
# TODO(anj): Make this a flag for both "lm" and "seq" models.
if i == 5:
break
if i == 1:
epoch_start_time = time.time()
source, target = get_batch(batch)
source, target = source.cuda(), target.cuda()
if i > 0:
total_tokens += source.numel()
with _get_profiler_context(args.use_profiler) as prof:
optimizer.zero_grad()
with _get_profiler_record_context("FW pass", args.use_profiler):
output = model(source)
with _get_profiler_record_context("Loss", args.use_profiler):
loss = criterion(output.view(-1, vocab_size), target.view(-1))
with _get_profiler_record_context("BW pass", args.use_profiler):
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), model_specs["clip_value"])
with _get_profiler_record_context("Opt step", args.use_profiler):
optimizer.step()
total_loss += loss.item()
log_interval = 1
total_tokens_per_log_interval += source.numel()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, total_tokens_per_log_interval / elapsed, cur_loss, math.exp(cur_loss)
)
)
total_tokens_per_log_interval = 0
total_loss = 0
start_time = time.time()
if args.use_profiler:
prof.export_chrome_trace("/tmp/offload_prof")
if epoch_start_time != 0:
wps = total_tokens / (time.time() - epoch_start_time)
else:
raise RuntimeError(
"Unable to benchmark on a single batch. Increase the size " " of the dataset and rerun the benchmark."
)
return wps, loss.item()
def verify_peak_memory(golden_config, std_dev):
current_device_usage = torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]
golden_ref = golden_config["peak_mem_usage"]
if not current_device_usage < golden_ref * std_dev:
raise RuntimeError(
"Peak memory usage for cuda device {:d} is {:d} which"
"is less than golden reference value of {:d}".format(0, current_device_usage, golden_ref)
)
def verify_lm_throughput(wps, golden_config, args):
"""Verify that words per second for a given benchmark run matches the golden data."""
if not wps > (golden_config["avg_wps"] - (3 * golden_config["std_dev_wps"])):
raise RuntimeError(
"Throughput(wps):{:.2f} is below the golden threshold of an "
"average value of {:.2f} and standard dev of {:.2f}.".format(
wps, golden_config["avg_wps"], golden_config["std_dev_wps"]
)
)
def benchmark_language_model(model_config, model, benchmark_config, model_specs, args):
epoch = benchmark_config["epochs"]
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
wps, loss = train(model_config, model, benchmark_config, model_specs, args)
elapsed_time = time.time() - start_time
print("-" * 110)
print("| end of epoch {:1d} | time: {:5.2f}s | train loss {:5.2f} ".format(epoch, elapsed_time, loss))
print("-" * 110)
if args.model_name == "seq":
raise RuntimeError(
f"Golden data verification is only supported for the Transformer(lm) model and not {args.model_name}"
)
print("Throughput(wps) is {:.2f}.".format(wps))
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
if not args.dry_run:
golden_config = get_golden_config(args.model_name, args)
verify_lm_throughput(wps, golden_config, args)
verify_peak_memory(golden_config, 1.1)
def get_synthetic_dataloaders(args, device, benchmark_config, model_specs):
"""Returns dataloader for synthetic data."""
if args.model_name == "lm":
return get_synthetic_wikitext2_dataloaders(args, benchmark_config, model_specs)
elif args.model_name == "seq":
transform = ToTensor()
dataloader = DataLoader(
FakeData(
image_size=(1, model_specs["inputs"], model_specs["inputs"]),
num_classes=model_specs["outputs"],
transform=transform,
),
batch_size=benchmark_config["batch_size"],
)
return dataloader, dataloader, dataloader
else:
raise RuntimeError(f"Unrecognized args.model_name {args.model_name}")
def get_real_dataloaders(args, device, benchmark_config, model_specs):
"""Returns dataloaders for real data."""
if args.model_name == "lm":
data = get_real_wikitext2_dataloaders(args, benchmark_config, model_specs)
ntokens, train_dataloader, valid_dataloader, test_dataloader = data
model_specs["vocab_size"] = ntokens
return train_dataloader, valid_dataloader, test_dataloader
else:
raise RuntimeError(f"Unrecognized args.model_mame {args.model_name}")
def create_model_config(args, benchmark_config=None, model_specs=None):
"""Return a dict with the given model, dataset and optimizer."""
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
device = torch.device("cpu")
if args.model_name == "lm":
if args.use_synthetic_data:
dataloader_fn = get_synthetic_dataloaders
else:
dataloader_fn = get_real_dataloaders
data = dataloader_fn(args, device, benchmark_config, model_specs)
model, optimizer = get_model_and_optimizer(args, device, benchmark_config, model_specs)
return {
"model": model,
"optimizer": optimizer,
"data": data,
}
elif args.model_name == "seq":
data = get_synthetic_dataloaders(
args, device, offload_seq.get_benchmark_config(), offload_seq.get_model_config()
)
model, optimizer = get_model_and_optimizer(args, device, benchmark_config, model_specs)
return {
"model": model,
"optimizer": optimizer,
"data": data,
}
else:
raise RuntimeError(f"Unrecognized args.model_mame {args.model_name}")
def create_benchmark_config(args):
"""Return a dict with configurations required for benchmarking `model_name` model."""
if args.model_name == "lm":
return lm_wikitext2.get_benchmark_config(checkpoint_activation=args.checkpoint_activation)
elif args.model_name == "seq":
return offload_seq.get_benchmark_config()
else:
raise RuntimeError(f"Unrecognized args.model_name {args.model_name}")
def get_golden_config(model_name, args):
"""Return a dict with the golden data for throughput and memory usage."""
if model_name == "lm":
return lm_wikitext2.get_golden_real_stats()
else:
raise RuntimeError(f"Unrecognized args.model_mame {args.model_name}")
def get_model_specs(model_name):
"""Return a dict with configurations required for configuring `model_name` model."""
if model_name == "lm":
return lm_wikitext2.get_model_config()
elif model_name == "seq":
return offload_seq.get_model_config()
else:
raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
def run_benchmark(args):
"""Benchmark a given model using a single process and single devices."""
# We need at least 1 GPU to benchmark the offload model API.
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 0
assert num_devices > 0
init_random_seed(0)
if args.model_name == "lm":
benchmark_config = create_benchmark_config(args)
model_specs = get_model_specs(args.model_name)
model_config = create_model_config(args, benchmark_config=benchmark_config, model_specs=model_specs)
model = model_config["model"]
benchmark_language_model(model_config, model, benchmark_config, model_specs, args)
elif args.model_name == "seq":
benchmark_config = create_benchmark_config(args)
model_specs = get_model_specs(args.model_name)
model_config = create_model_config(args, benchmark_config=benchmark_config, model_specs=model_specs)
model = model_config["model"]
train_seq(model_config, benchmark_config, model_specs, args)
else:
raise RuntimeError(f"Unable to recognize model name {args.model_name}")
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument(
"--dry_run", default=False, action="store_true", help="Run a sample training run without regression testing."
)
parser.add_argument(
"--debug",
action="store_true",
default=True,
help="Print debugging statements which is more verbose than the default.",
)
parser.add_argument(
"--model_name",
default="lm",
type=str,
help="Language Model(LM) used to benchmark nn.pipe.",
)
parser.add_argument(
"--use_synthetic_data", default=True, action="store_true", help="Uses synthetic data for running benchmarks."
)
parser.add_argument("--use_fp16", action="store_true", default=False)
parser.add_argument("--checkpoint_activation", action="store_true", default=False)
parser.add_argument("--use_profiler", action="store_true", default=False)
if __name__ == "__main__":
args = parser.parse_args()
logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
logging.info("Benchmark arguments: %s" % args)
run_benchmark(args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairscale.optim import GradScaler
class Offload_Transformer:
def get_model_config():
return {
"vocab_size": 10000,
"ninp": 2048, # embedding dimension
"nhid": 2048, # the dimension of the feedforward network model in nn.TransformerEncoder
"nhead": 32, # the number of heads in the multiheadattention models
"dropout": 0,
"initrange": 0.1,
"scaler": GradScaler(),
"clip_value": 0.05,
"num_decoder_layers": 10,
"seq_len": 32,
}
def get_benchmark_config(checkpoint_activation=True):
return {
"epochs": 1,
"lr": 0.001, # learning rate
"batch_size": 8,
"criterion": nn.CrossEntropyLoss(),
"checkpoint_activation": checkpoint_activation,
"num_microbatches": 1,
"slices": 3,
}
def get_golden_real_stats():
return {
"avg_wps": 192.105,
"std_dev_wps": 39.56,
"peak_mem_usage": 1180848128,
}
class Offload_Sequential:
def get_model_config():
return {
"inputs": 100,
"outputs": 5,
"hidden": 1000,
"layers": 100,
"clip_value": 0.05,
}
def get_benchmark_config():
return {
"epochs": 1,
"lr": 0.001, # learning rate
"batch_size": 8,
"criterion": nn.CrossEntropyLoss(),
"slices": 3,
"checkpoint_activation": True,
"num_microbatches": 1,
}
class FSDP:
def get_model_config():
return {
"vocab_size": 10000,
"ninp": 2048, # embedding dimension
"nhid": 2048, # the dimension of the feedforward network model in nn.TransformerEncoder
"nhead": 32, # the number of heads in the multiheadattention models
"dropout": 0,
"initrange": 0.1,
"scaler": GradScaler(),
"clip_value": 0.05,
"num_decoder_layers": 10,
"seq_len": 32,
}
def get_benchmark_config():
return {
"epochs": 1,
"lr": 0.001, # learning rate
"batch_size": 8,
"criterion": nn.CrossEntropyLoss(),
}
def get_golden_real_stats():
raise NotImplementedError("Synthetic data benchmarks are not supported.")
def get_golden_synthetic_stats():
return {
"avg_wps": 486.303,
"std_dev_wps": 71.307,
"peak_mem_usage": [5.5055 * 2**30, 5.5055 * 2**30, 5.5055 * 2**30, 5.5055 * 2**30],
}
class Pipe:
def get_model_config():
return {
"vocab_size": 10000,
"ninp": 2048, # embedding dimension
"nhid": 2048, # the dimension of the feedforward network model in nn.TransformerEncoder
"nhead": 32, # the number of heads in the multiheadattention models
"dropout": 0,
"initrange": 0.1,
"scaler": GradScaler(),
"clip_value": 0.05,
"num_decoder_layers": 10,
"seq_len": 32,
}
def get_benchmark_config():
return {
"epochs": 1,
"lr": 0.001, # learning rate
"batch_size": 8,
"criterion": nn.CrossEntropyLoss(),
}
def get_golden_real_stats():
return {
"avg_wps": 703.778,
"std_dev_wps": 5.732,
"peak_mem_usage": [2320996352, 1396742144, 1396742144, 2340010496],
}
def get_golden_synthetic_stats():
# TODO(anj-s): Add support for synthetic regression benchmarks
raise NotImplementedError("Synthetic data benchmarks are not supported.")
class MOE:
def get_model_config():
return {
"vocab_size": 10000,
"ninp": 1024, # embedding dimension
"nhid": 4096, # the dimension of the feedforward network model in nn.TransformerEncoder
"nhead": 32, # the number of heads in the multiheadattention models
"dropout": 0,
"initrange": 0.1,
"scaler": GradScaler(),
"clip_value": 0.05,
"num_decoder_layers": 20,
"seq_len": 33, # (seq_len - 1) needs to be divisible by num_local_experts
"is_moe": True,
"num_local_experts": 2,
}
def get_benchmark_config():
return {
"epochs": 1,
"lr": 0.001, # learning rate
"batch_size": 32,
"criterion": nn.CrossEntropyLoss(),
}
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
def get_golden_real_stats():
return {
"reference_speed": 578,
"reference_memory": 945,
"reference_loss": 0.026,
}
def get_golden_synthetic_stats():
# TODO(anj-s): Add support for synthetic regression benchmarks
raise NotImplementedError("Synthetic data benchmarks are not supported.")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
from distutils.version import LooseVersion
import io
import operator
import tempfile
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torchtext
from torchtext.data.utils import get_tokenizer
from torchtext.utils import download_from_url, extract_archive
if operator.ge(torchtext.__version__, LooseVersion("0.10.0")):
from torchtext.legacy.vocab import build_vocab_from_iterator
else:
from torchtext.vocab import build_vocab_from_iterator
def _batchify(data, batch_size):
data = torch.tensor(data)
# Divide the dataset into bsz parts.
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
data = data.view(batch_size, -1).t().contiguous()
return data
def _get_total_batch_size(benchmark_config, model_specs):
return model_specs["seq_len"] * benchmark_config["batch_size"]
DatasetsInfo = namedtuple("DataSetsInfo", ["ntokens", "train_dataset", "valid_dataset", "test_dataset"])
def get_real_datasets():
url = "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip"
tmpdir = tempfile.TemporaryDirectory()
test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url, root=tmpdir.name))
tokenizer = get_tokenizer("basic_english")
def data_process(raw_text_iter):
data = [torch.tensor([vocab[token] for token in tokenizer(item)], dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
vocab = build_vocab_from_iterator(map(tokenizer, iter(io.open(train_filepath, encoding="utf8"))))
train_dataset = data_process(iter(io.open(train_filepath, encoding="utf8")))
valid_dataset = data_process(iter(io.open(valid_filepath, encoding="utf8")))
test_dataset = data_process(iter(io.open(test_filepath, encoding="utf8")))
return DatasetsInfo(len(vocab.stoi), train_dataset, valid_dataset, test_dataset)
def get_dataloaders(datasets_info, benchmark_config, model_specs, num_replicas=1, rank=0):
ntokens, train_dataset, valid_dataset, test_dataset = datasets_info
def batchify(data):
batch_size = benchmark_config["batch_size"]
return _batchify(data, batch_size)
total_batch_size = _get_total_batch_size(benchmark_config, model_specs)
train_dataloader = DataLoader(
train_dataset,
sampler=DistributedSampler(train_dataset, num_replicas=num_replicas, rank=rank),
batch_size=total_batch_size,
collate_fn=batchify,
)
valid_dataloader = DataLoader(
valid_dataset,
sampler=DistributedSampler(valid_dataset, num_replicas=num_replicas, rank=rank),
batch_size=total_batch_size,
collate_fn=batchify,
)
test_dataloader = DataLoader(
test_dataset,
sampler=DistributedSampler(test_dataset, num_replicas=num_replicas, rank=rank),
batch_size=total_batch_size,
collate_fn=batchify,
)
return train_dataloader, valid_dataloader, test_dataloader
def get_real_dataloaders(args, benchmark_config, model_specs, num_replicas=1, rank=0):
"""Return real dataloaders for training, testing and validation."""
dataset_info = get_real_datasets()
train_dataloader, valid_dataloader, test_dataloader = get_dataloaders(
dataset_info, benchmark_config, model_specs, num_replicas, rank
)
return dataset_info.ntokens, train_dataloader, valid_dataloader, test_dataloader
def get_synthetic_datasets():
# vocab_size is 10000 and length of the real data is 2049990.
lm_dataset = torch.randint(1, 10000, (2049990,))
return DatasetsInfo(10000, lm_dataset, lm_dataset, lm_dataset)
def get_synthetic_dataloaders(args, benchmark_config, model_specs, num_replicas=1, rank=0):
"""Return synthetic dataloaders for training, testing and validation."""
return get_dataloaders(get_synthetic_datasets(), benchmark_config, model_specs, num_replicas, rank)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
import shutil
import tempfile
from torchvision.datasets import MNIST
TEMPDIR = tempfile.gettempdir()
def setup_cached_mnist():
done, tentatives = False, 0
while not done and tentatives < 5:
# Monkey patch the resource URLs to work around a possible blacklist
MNIST.mirrors = ["https://github.com/blefaudeux/mnist_dataset/raw/main/"] + MNIST.mirrors
# This will automatically skip the download if the dataset is already there, and check the checksum
try:
_ = MNIST(transform=None, download=True, root=TEMPDIR)
done = True
except RuntimeError as e:
logging.warning(e)
mnist_root = Path(TEMPDIR + "/MNIST")
# Corrupted data, erase and restart
shutil.rmtree(str(mnist_root))
tentatives += 1
if done is False:
logging.error("Could not download MNIST dataset")
exit(-1)
else:
logging.info("Dataset downloaded")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
from fairscale.nn.moe.moe_layer import MOELayer
from fairscale.nn.moe.top2gate import Top2Gate
# TODO(anj-s): Identify if we need this initialization logic for the below wrapped layers.
class EmbeddingLayer(nn.Embedding):
"""Wrapped nn.Embedding layer to allow for weight initialization."""
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp_sqrt = math.sqrt(ninp)
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * self.ninp_sqrt
class PositionalEncodingLayer(nn.Module):
"""PositionalEncoding layer for a given Transformer model."""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class FeedForwardLayer(nn.Module):
"""FeedForward layer for a given Transformer model."""
def __init__(self, d_model, dim_feedforward, activation, dropout) -> None:
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = activation
self.dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x):
return self.dropout2(self.linear2(self.dropout1(self.activation(self.linear1(x)))))
# Forked from https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer.
# Parameters is_moe and num_local_experts are added.
class TransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of the intermediate layer, can be a string
("relu" or "gelu") or a unary callable. Default: relu
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
norm_first: if ``True``, layer norm is done prior to attention and feedforward
operations, respectivaly. Otherwise it's done after. Default: ``False`` (after).
is_moe: if ``True``, the feedforward layer will have MOE enabled.
num_local_experts: number of local experts for MOE.
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
__constants__ = ["norm_first"]
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation=nn.ReLU(),
layer_norm_eps=1e-5,
norm_first=False,
is_moe=False,
num_local_experts=1,
):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm_first = norm_first
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.is_moe = is_moe
if is_moe:
world_size = 1 if not torch.distributed.is_initialized() else torch.distributed.get_world_size()
num_global_experts = num_local_experts * world_size
self.gate = Top2Gate(d_model, num_global_experts)
experts = nn.ModuleList(
[FeedForwardLayer(d_model, dim_feedforward, activation, dropout) for _ in range(num_local_experts)]
)
self.moe_layer = MOELayer(self.gate, experts)
else:
self.ff_block = FeedForwardLayer(d_model, dim_feedforward, activation, dropout)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
# see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
x = src
if self.norm_first:
x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
x = x + self._ff_block(self.norm2(x))
else:
x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x, attn_mask, key_padding_mask):
x = self.self_attn(x, x, x, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)[0]
return self.dropout(x)
# feed forward block
def _ff_block(self, x):
if self.is_moe:
return self.moe_layer(x)
else:
return self.ff_block(x)
class TransformerDecoderLayer(TransformerEncoderLayer):
"""TransformerDecoder layer which inherits from TransformerEncoderLayer."""
def __init__(self, ninp, nhead, nhid, dropout, is_moe=False, num_local_experts=1):
super().__init__(ninp, nhead, nhid, dropout, is_moe=is_moe, num_local_experts=num_local_experts)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
# TODO(anj-s): Fix the data format so that we have [seq_len, batch_size, embedding dim].
# Currently real data has seq_len as the second dimension and batch_size as the first dimension.
# We need to mask the sequence length dimension and not the batch size.
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
"""Wrapped nn.Linear layer to allow for weight initialization."""
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLM(nn.Sequential):
"""A GPT-2 based nn.Sequential language model."""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder, is_moe=False, num_local_experts=1):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout, is_moe, num_local_experts))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLM, self).__init__(*layers)
|
#!/usr/bin/env python
"""
The script builds OpenCV.framework for OSX.
"""
from __future__ import print_function
import os, os.path, sys, argparse, traceback, multiprocessing
# import common code
sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios'))
from build_framework import Builder
MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line options or environment variable
class OSXBuilder(Builder):
def getToolchain(self, arch, target):
return None
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
"MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'],
"ARCHS=%s" % archs[0],
"-sdk", target.lower(),
"-configuration", "Debug" if self.debug else "Release",
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count())
]
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "osx", "Info.plist")
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET')
parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)')
parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)')
args = parser.parse_args()
os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target
print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'])
b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree,
[
(["x86_64"], "MacOSX")
], args.debug, args.debug_info)
b.build(args.out)
|
# Classes and methods whitelist
core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',\
'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed', \
'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
'Algorithm': []}
imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
'calcBackProject','calcHist','circle','compareHist','connectedComponents','connectedComponentsWithStats', \
'contourArea', 'convexHull', 'convexityDefects', 'cornerHarris','cornerMinEigenVal','createCLAHE', \
'createLineSegmentDetector','cvtColor','demosaicing','dilate', 'distanceTransform','distanceTransformWithLabels', \
'drawContours','ellipse','ellipse2Poly','equalizeHist','erode', 'filter2D', 'findContours','fitEllipse', \
'fitLine', 'floodFill','getAffineTransform', 'getPerspectiveTransform', 'getRotationMatrix2D', 'getStructuringElement', \
'goodFeaturesToTrack','grabCut','initUndistortRectifyMap', 'integral','integral2', 'isContourConvex', 'line', \
'matchShapes', 'matchTemplate','medianBlur', 'minAreaRect', 'minEnclosingCircle', 'moments', 'morphologyEx', \
'pointPolygonTest', 'putText','pyrDown','pyrUp','rectangle','remap', 'resize','sepFilter2D','threshold', \
'undistort','warpAffine','warpPerspective','warpPolar','watershed', \
'fillPoly', 'fillConvexPoly'],
'CLAHE': ['apply', 'collectGarbage', 'getClipLimit', 'getTilesGridSize', 'setClipLimit', 'setTilesGridSize']}
objdetect = {'': ['groupRectangles'],
'HOGDescriptor': ['load', 'HOGDescriptor', 'getDefaultPeopleDetector', 'getDaimlerPeopleDetector', 'setSVMDetector', 'detectMultiScale'],
'CascadeClassifier': ['load', 'detectMultiScale2', 'CascadeClassifier', 'detectMultiScale3', 'empty', 'detectMultiScale']}
video = {'': ['CamShift', 'calcOpticalFlowFarneback', 'calcOpticalFlowPyrLK', 'createBackgroundSubtractorMOG2', \
'findTransformECC', 'meanShift'],
'BackgroundSubtractorMOG2': ['BackgroundSubtractorMOG2', 'apply'],
'BackgroundSubtractor': ['apply', 'getBackgroundImage']}
dnn = {'dnn_Net': ['setInput', 'forward'],
'': ['readNetFromCaffe', 'readNetFromTensorflow', 'readNetFromTorch', 'readNetFromDarknet',
'readNetFromONNX', 'readNet', 'blobFromImage']}
features2d = {'Feature2D': ['detect', 'compute', 'detectAndCompute', 'descriptorSize', 'descriptorType', 'defaultNorm', 'empty', 'getDefaultName'],
'BRISK': ['create', 'getDefaultName'],
'ORB': ['create', 'setMaxFeatures', 'setScaleFactor', 'setNLevels', 'setEdgeThreshold', 'setFirstLevel', 'setWTA_K', 'setScoreType', 'setPatchSize', 'getFastThreshold', 'getDefaultName'],
'MSER': ['create', 'detectRegions', 'setDelta', 'getDelta', 'setMinArea', 'getMinArea', 'setMaxArea', 'getMaxArea', 'setPass2Only', 'getPass2Only', 'getDefaultName'],
'FastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
'AgastFeatureDetector': ['create', 'setThreshold', 'getThreshold', 'setNonmaxSuppression', 'getNonmaxSuppression', 'setType', 'getType', 'getDefaultName'],
'GFTTDetector': ['create', 'setMaxFeatures', 'getMaxFeatures', 'setQualityLevel', 'getQualityLevel', 'setMinDistance', 'getMinDistance', 'setBlockSize', 'getBlockSize', 'setHarrisDetector', 'getHarrisDetector', 'setK', 'getK', 'getDefaultName'],
# 'SimpleBlobDetector': ['create'],
'KAZE': ['create', 'setExtended', 'getExtended', 'setUpright', 'getUpright', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
'AKAZE': ['create', 'setDescriptorType', 'getDescriptorType', 'setDescriptorSize', 'getDescriptorSize', 'setDescriptorChannels', 'getDescriptorChannels', 'setThreshold', 'getThreshold', 'setNOctaves', 'getNOctaves', 'setNOctaveLayers', 'getNOctaveLayers', 'setDiffusivity', 'getDiffusivity', 'getDefaultName'],
'DescriptorMatcher': ['add', 'clear', 'empty', 'isMaskSupported', 'train', 'match', 'knnMatch', 'radiusMatch', 'clone', 'create'],
'BFMatcher': ['isMaskSupported', 'create'],
'': ['drawKeypoints', 'drawMatches', 'drawMatchesKnn']}
photo = {'': ['createAlignMTB', 'createCalibrateDebevec', 'createCalibrateRobertson', \
'createMergeDebevec', 'createMergeMertens', 'createMergeRobertson', \
'createTonemapDrago', 'createTonemapMantiuk', 'createTonemapReinhard', 'inpaint'],
'CalibrateCRF': ['process'],
'AlignMTB' : ['calculateShift', 'shiftMat', 'computeBitmaps', 'getMaxBits', 'setMaxBits', \
'getExcludeRange', 'setExcludeRange', 'getCut', 'setCut'],
'CalibrateDebevec' : ['getLambda', 'setLambda', 'getSamples', 'setSamples', 'getRandom', 'setRandom'],
'CalibrateRobertson' : ['getMaxIter', 'setMaxIter', 'getThreshold', 'setThreshold', 'getRadiance'],
'MergeExposures' : ['process'],
'MergeDebevec' : ['process'],
'MergeMertens' : ['process', 'getContrastWeight', 'setContrastWeight', 'getSaturationWeight', \
'setSaturationWeight', 'getExposureWeight', 'setExposureWeight'],
'MergeRobertson' : ['process'],
'Tonemap' : ['process' , 'getGamma', 'setGamma'],
'TonemapDrago' : ['getSaturation', 'setSaturation', 'getBias', 'setBias', \
'getSigmaColor', 'setSigmaColor', 'getSigmaSpace','setSigmaSpace'],
'TonemapMantiuk' : ['getScale', 'setScale', 'getSaturation', 'setSaturation'],
'TonemapReinhard' : ['getIntensity', 'setIntensity', 'getLightAdaptation', 'setLightAdaptation', \
'getColorAdaptation', 'setColorAdaptation']
}
aruco = {'': ['detectMarkers', 'drawDetectedMarkers', 'drawAxis', 'estimatePoseSingleMarkers', 'estimatePoseBoard', 'estimatePoseCharucoBoard', 'interpolateCornersCharuco', 'drawDetectedCornersCharuco'],
'aruco_Dictionary': ['get', 'drawMarker'],
'aruco_Board': ['create'],
'aruco_GridBoard': ['create', 'draw'],
'aruco_CharucoBoard': ['create', 'draw'],
}
calib3d = {'': ['findHomography', 'calibrateCameraExtended', 'drawFrameAxes', 'estimateAffine2D', 'getDefaultNewCameraMatrix', 'initUndistortRectifyMap', 'Rodrigues']}
white_list = makeWhiteList([core, imgproc, objdetect, video, dnn, features2d, photo, aruco, calib3d])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.