python_code
stringlengths 0
456k
|
---|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.git_version_info import torch_info
def skip_on_arch(min_arch=7):
if deepspeed.accelerator.get_accelerator().device_name() == 'cuda':
if torch.cuda.get_device_capability()[0] < min_arch: #ignore-cuda
pytest.skip(f"needs higher compute capability than {min_arch}")
else:
assert deepspeed.accelerator.get_accelerator().device_name() == 'xpu'
return
def skip_on_cuda(valid_cuda):
split_version = lambda x: map(int, x.split('.')[:2])
if deepspeed.accelerator.get_accelerator().device_name() == 'cuda':
CUDA_MAJOR, CUDA_MINOR = split_version(torch_info['cuda_version'])
CUDA_VERSION = (CUDA_MAJOR * 10) + CUDA_MINOR
if valid_cuda.count(CUDA_VERSION) == 0:
pytest.skip(f"requires cuda versions {valid_cuda}")
else:
assert deepspeed.accelerator.get_accelerator().device_name() == 'xpu'
return
def required_torch_version():
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR >= 1 and TORCH_MINOR >= 8:
return True
else:
return False
def bf16_required_version_check(accelerator_check=True):
split_version = lambda x: map(int, x.split('.')[:2])
TORCH_MAJOR, TORCH_MINOR = split_version(torch_info['version'])
NCCL_MAJOR, NCCL_MINOR = split_version(torch_info['nccl_version'])
CUDA_MAJOR, CUDA_MINOR = split_version(torch_info['cuda_version'])
# Sometimes bf16 tests are runnable even if not natively supported by accelerator
if accelerator_check:
accelerator_pass = torch_info['bf16_support']
else:
accelerator_pass = True
if (TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)) and (CUDA_MAJOR >= 11) and (
NCCL_MAJOR > 2 or (NCCL_MAJOR == 2 and NCCL_MINOR >= 10)) and accelerator_pass:
return True
else:
return False
def required_minimum_torch_version(major_version, minor_version):
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR < major_version:
return False
return TORCH_MAJOR > major_version or TORCH_MINOR >= minor_version
def required_maximum_torch_version(major_version, minor_version):
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR > major_version:
return False
return TORCH_MAJOR < major_version or TORCH_MINOR <= minor_version
def required_amp_check():
from importlib.util import find_spec
if find_spec('apex') is None:
return False
else:
return True
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import deepspeed
import deepspeed.comm as dist
import deepspeed.runtime.utils as ds_utils
from deepspeed.accelerator import get_accelerator
from deepspeed.runtime.pipe.module import PipelineModule, LayerSpec
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Linear(256, num_classes)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, x, y):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return self.loss_fn(x, y)
class AlexNetPipe(AlexNet):
def to_layers(self):
layers = [*self.features, lambda x: x.view(x.size(0), -1), self.classifier]
return layers
class AlexNetPipeSpec(PipelineModule):
def __init__(self, num_classes=10, **kwargs):
self.num_classes = num_classes
specs = [
LayerSpec(nn.Conv2d, 3, 64, kernel_size=11, stride=4, padding=5),
LayerSpec(nn.ReLU, inplace=True),
LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2),
LayerSpec(nn.Conv2d, 64, 192, kernel_size=5, padding=2),
F.relu,
LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2),
LayerSpec(nn.Conv2d, 192, 384, kernel_size=3, padding=1),
F.relu,
LayerSpec(nn.Conv2d, 384, 256, kernel_size=3, padding=1),
F.relu,
LayerSpec(nn.Conv2d, 256, 256, kernel_size=3, padding=1),
F.relu,
LayerSpec(nn.MaxPool2d, kernel_size=2, stride=2),
lambda x: x.view(x.size(0), -1),
LayerSpec(nn.Linear, 256, self.num_classes), # classifier
]
super().__init__(layers=specs, loss_fn=nn.CrossEntropyLoss(), **kwargs)
# Define this here because we cannot pickle local lambda functions
def cast_to_half(x):
return x.half()
def cifar_trainset(fp16=False):
torchvision = pytest.importorskip("torchvision", minversion="0.5.0")
import torchvision.transforms as transforms
transform_list = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
if fp16:
transform_list.append(torchvision.transforms.Lambda(cast_to_half))
transform = transforms.Compose(transform_list)
local_rank = get_accelerator().current_device()
# Only one rank per machine downloads.
dist.barrier()
if local_rank != 0:
dist.barrier()
trainset = torchvision.datasets.CIFAR10(root='/blob/cifar10-data', train=True, download=True, transform=transform)
if local_rank == 0:
dist.barrier()
return trainset
def train_cifar(model, config, num_steps=400, average_dp_losses=True, fp16=True, seed=123):
with get_accelerator().random().fork_rng(devices=[get_accelerator().current_device_name()]):
ds_utils.set_random_seed(seed)
# disable dropout
model.eval()
trainset = cifar_trainset(fp16=fp16)
config['local_rank'] = dist.get_rank()
engine, _, _, _ = deepspeed.initialize(config=config,
model=model,
model_parameters=[p for p in model.parameters()],
training_data=trainset)
losses = []
for step in range(num_steps):
loss = engine.train_batch()
losses.append(loss.item())
if step % 50 == 0 and dist.get_rank() == 0:
print(f'STEP={step} LOSS={loss.item()}')
if average_dp_losses:
loss_tensor = torch.tensor(losses).to(get_accelerator().device_name())
dist.all_reduce(loss_tensor)
loss_tensor /= dist.get_world_size()
losses = loss_tensor.tolist()
return losses
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import time
import inspect
from abc import ABC, abstractmethod
from pathlib import Path
import torch
import torch.multiprocessing as mp
import deepspeed
from deepspeed.accelerator import get_accelerator
import deepspeed.comm as dist
from torch.multiprocessing import Process
import pytest
from _pytest.outcomes import Skipped
from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker
# Worker timeout *after* the first worker has completed.
DEEPSPEED_UNIT_WORKER_TIMEOUT = 120
# Worker timeout for tests that hang
DEEPSPEED_TEST_TIMEOUT = 600
def get_xdist_worker_id():
xdist_worker = os.environ.get('PYTEST_XDIST_WORKER', None)
if xdist_worker is not None:
xdist_worker_id = xdist_worker.replace('gw', '')
return int(xdist_worker_id)
return None
def get_master_port():
master_port = os.environ.get('DS_TEST_PORT', '29503')
xdist_worker_id = get_xdist_worker_id()
if xdist_worker_id is not None:
master_port = str(int(master_port) + xdist_worker_id)
return master_port
def set_accelerator_visible():
cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None)
xdist_worker_id = get_xdist_worker_id()
if xdist_worker_id is None:
xdist_worker_id = 0
if cuda_visible is None:
# CUDA_VISIBLE_DEVICES is not set, discover it using accelerator specific command instead
import subprocess
if get_accelerator().device_name() == 'cuda':
is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
if is_rocm_pytorch:
rocm_smi = subprocess.check_output(['rocm-smi', '--showid'])
gpu_ids = filter(lambda s: 'GPU' in s, rocm_smi.decode('utf-8').strip().split('\n'))
num_accelerators = len(list(gpu_ids))
else:
nvidia_smi = subprocess.check_output(['nvidia-smi', '--list-gpus'])
num_accelerators = len(nvidia_smi.decode('utf-8').strip().split('\n'))
elif get_accelerator().device_name() == 'xpu':
import re
clinfo = subprocess.check_output(['clinfo'])
lines = clinfo.decode('utf-8').strip().split('\n')
num_accelerators = 0
for line in lines:
match = re.search('Device Type.*GPU', line)
if match:
num_accelerators += 1
else:
assert get_accelerator().device_name() == 'cpu'
cpu_sockets = int(
subprocess.check_output('cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True))
num_accelerators = cpu_sockets
cuda_visible = ",".join(map(str, range(num_accelerators)))
# rotate list based on xdist worker id, example below
# wid=0 -> ['0', '1', '2', '3']
# wid=1 -> ['1', '2', '3', '0']
# wid=2 -> ['2', '3', '0', '1']
# wid=3 -> ['3', '0', '1', '2']
dev_id_list = cuda_visible.split(",")
dev_id_list = dev_id_list[xdist_worker_id:] + dev_id_list[:xdist_worker_id]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(dev_id_list)
class DistributedExec(ABC):
"""
Base class for distributed execution of functions/methods. Contains common
methods needed for DistributedTest and DistributedFixture.
"""
world_size = 2
backend = get_accelerator().communication_backend_name()
init_distributed = True
set_dist_env = True
requires_cuda_env = True
@abstractmethod
def run(self):
...
def __call__(self, request=None):
self._fixture_kwargs = self._get_fixture_kwargs(request, self.run)
world_size = self.world_size
if self.requires_cuda_env and not get_accelerator().is_available():
pytest.skip("only supported in accelerator environments.")
if isinstance(world_size, int):
world_size = [world_size]
for procs in world_size:
self._launch_procs(procs)
time.sleep(0.5)
def _get_fixture_kwargs(self, request, func):
if not request:
return {}
# Grab fixture / parametrize kwargs from pytest request object
fixture_kwargs = {}
params = inspect.getfullargspec(func).args
params.remove("self")
for p in params:
try:
fixture_kwargs[p] = request.getfixturevalue(p)
except FixtureLookupError:
pass # test methods can have kwargs that are not fixtures
return fixture_kwargs
def _launch_procs(self, num_procs):
if get_accelerator().is_available() and get_accelerator().device_count() < num_procs:
pytest.skip(
f"Skipping test because not enough GPUs are available: {num_procs} required, {get_accelerator().device_count()} available"
)
mp.set_start_method('forkserver', force=True)
skip_msg = mp.Queue() # Allows forked processes to share pytest.skip reason
processes = []
for local_rank in range(num_procs):
p = Process(target=self._dist_init, args=(local_rank, num_procs, skip_msg))
p.start()
processes.append(p)
# Now loop and wait for a test to complete. The spin-wait here isn't a big
# deal because the number of processes will be O(#GPUs) << O(#CPUs).
any_done = False
start = time.time()
while (not any_done) and ((time.time() - start) < DEEPSPEED_TEST_TIMEOUT):
for p in processes:
if not p.is_alive():
any_done = True
break
time.sleep(.1) # So we don't hog CPU
# If we hit the timeout, then presume a test is hanged
if not any_done:
for p in processes:
p.terminate()
pytest.exit("Test hanged, exiting", returncode=0)
# Wait for all other processes to complete
for p in processes:
p.join(DEEPSPEED_UNIT_WORKER_TIMEOUT)
failed = [(rank, p) for rank, p in enumerate(processes) if p.exitcode != 0]
for rank, p in failed:
# If it still hasn't terminated, kill it because it hung.
if p.exitcode is None:
p.terminate()
pytest.fail(f'Worker {rank} hung.', pytrace=False)
if p.exitcode < 0:
pytest.fail(f'Worker {rank} killed by signal {-p.exitcode}', pytrace=False)
if p.exitcode > 0:
pytest.fail(f'Worker {rank} exited with code {p.exitcode}', pytrace=False)
if not skip_msg.empty():
# This assumed all skip messages are the same, it may be useful to
# add a check here to assert all exit messages are equal
pytest.skip(skip_msg.get())
def _dist_init(self, local_rank, num_procs, skip_msg):
"""Initialize deepspeed.comm and execute the user function. """
if self.set_dist_env:
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = get_master_port()
os.environ['LOCAL_RANK'] = str(local_rank)
# NOTE: unit tests don't support multi-node so local_rank == global rank
os.environ['RANK'] = str(local_rank)
os.environ['WORLD_SIZE'] = str(num_procs)
# turn off NCCL logging if set
os.environ.pop('NCCL_DEBUG', None)
if get_accelerator().is_available():
set_accelerator_visible()
if self.init_distributed:
deepspeed.init_distributed(dist_backend=self.backend)
dist.barrier()
if get_accelerator().is_available():
get_accelerator().set_device(local_rank)
try:
self.run(**self._fixture_kwargs)
except BaseException as e:
if isinstance(e, Skipped):
skip_msg.put(e.msg)
else:
raise e
if self.init_distributed or dist.is_initialized():
# make sure all ranks finish at the same time
dist.barrier()
# tear down after test completes
dist.destroy_process_group()
class DistributedFixture(DistributedExec):
"""
Implementation that extends @pytest.fixture to allow for distributed execution.
This is primarily meant to be used when a test requires executing two pieces of
code with different world sizes.
There are 2 parameters that can be modified:
- world_size: int = 2 -- the number of processes to launch
- backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use
Features:
- able to call pytest.skip() inside fixture
- can be reused by multiple tests
- can accept other fixtures as input
Limitations:
- cannot use @pytest.mark.parametrize
- world_size cannot be modified after definition and only one world_size value is accepted
- any fixtures used must also be used in the test that uses this fixture (see example below)
- return values cannot be returned. Passing values to a DistributedTest
object can be achieved using class_tmpdir and writing to file (see example below)
Usage:
- must implement a run(self, ...) method
- fixture can be used by making the class name input to a test function
Example:
@pytest.fixture(params=[10,20])
def regular_pytest_fixture(request):
return request.param
class distributed_fixture_example(DistributedFixture):
world_size = 4
def run(self, regular_pytest_fixture, class_tmpdir):
assert int(os.environ["WORLD_SIZE"]) == self.world_size
local_rank = os.environ["LOCAL_RANK"]
print(f"Rank {local_rank} with value {regular_pytest_fixture}")
with open(os.path.join(class_tmpdir, f"{local_rank}.txt"), "w") as f:
f.write(f"{local_rank},{regular_pytest_fixture}")
class TestExample(DistributedTest):
world_size = 1
def test(self, distributed_fixture_example, regular_pytest_fixture, class_tmpdir):
assert int(os.environ["WORLD_SIZE"]) == self.world_size
for rank in range(4):
with open(os.path.join(class_tmpdir, f"{rank}.txt"), "r") as f:
assert f.read() == f"{rank},{regular_pytest_fixture}"
"""
is_dist_fixture = True
# These values are just placeholders so that pytest recognizes this as a fixture
_pytestfixturefunction = FixtureFunctionMarker(scope="function", params=None)
__name__ = ""
def __init__(self):
assert isinstance(self.world_size, int), "Only one world size is allowed for distributed fixtures"
self.__name__ = type(self).__name__
_pytestfixturefunction = FixtureFunctionMarker(scope="function", params=None, name=self.__name__)
class DistributedTest(DistributedExec):
"""
Implementation for running pytest with distributed execution.
There are 2 parameters that can be modified:
- world_size: Union[int,List[int]] = 2 -- the number of processes to launch
- backend: Literal['nccl','mpi','gloo'] = 'nccl' -- which backend to use
Features:
- able to call pytest.skip() inside tests
- works with pytest fixtures, parametrize, mark, etc.
- can contain multiple tests (each of which can be parametrized separately)
- class methods can be fixtures (usable by tests in this class only)
- world_size can be changed for individual tests using @pytest.mark.world_size(world_size)
- class_tmpdir is a fixture that can be used to get a tmpdir shared among
all tests (including DistributedFixture)
Usage:
- class name must start with "Test"
- must implement one or more test*(self, ...) methods
Example:
@pytest.fixture(params=[10,20])
def val1(request):
return request.param
@pytest.mark.fast
@pytest.mark.parametrize("val2", [30,40])
class TestExample(DistributedTest):
world_size = 2
@pytest.fixture(params=[50,60])
def val3(self, request):
return request.param
def test_1(self, val1, val2, str1="hello world"):
assert int(os.environ["WORLD_SIZE"]) == self.world_size
assert all(val1, val2, str1)
@pytest.mark.world_size(1)
@pytest.mark.parametrize("val4", [70,80])
def test_2(self, val1, val2, val3, val4):
assert int(os.environ["WORLD_SIZE"]) == 1
assert all(val1, val2, val3, val4)
"""
is_dist_test = True
# Temporary directory that is shared among test methods in a class
@pytest.fixture(autouse=True, scope="class")
def class_tmpdir(self, tmpdir_factory):
fn = tmpdir_factory.mktemp(self.__class__.__name__)
return fn
def run(self, **fixture_kwargs):
self._current_test(**fixture_kwargs)
def __call__(self, request):
self._current_test = self._get_current_test_func(request)
self._fixture_kwargs = self._get_fixture_kwargs(request, self._current_test)
if self.requires_cuda_env and not get_accelerator().is_available():
pytest.skip("only supported in accelerator environments.")
# Catch world_size override pytest mark
for mark in getattr(request.function, "pytestmark", []):
if mark.name == "world_size":
world_size = mark.args[0]
break
else:
world_size = self.world_size
if isinstance(world_size, int):
world_size = [world_size]
for procs in world_size:
self._launch_procs(procs)
time.sleep(0.5)
def _get_current_test_func(self, request):
# DistributedTest subclasses may have multiple test methods
func_name = request.function.__name__
return getattr(self, func_name)
def get_test_path(filename):
curr_path = Path(__file__).parent
return str(curr_path.joinpath(filename))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
class MultiOutputModel(torch.nn.Module):
def __init__(self, hidden_dim, weight_value):
super(MultiOutputModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
self.linear.weight.data.fill_(weight_value)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, inputs, targets):
losses = []
for x, y in zip(inputs, targets):
hidden_dim = self.linear(x)
loss = self.cross_entropy_loss(hidden_dim, y)
losses.append(loss)
return tuple(losses)
def multi_output_dataloader(model, total_samples, hidden_dim, device, inputs, targets):
assert len(inputs) == len(targets)
batch_size = model.train_micro_batch_size_per_gpu()
train_data = [
torch.full(size=(total_samples, hidden_dim), fill_value=x, device=device, dtype=torch.half, requires_grad=True)
for x in inputs
]
train_label = [torch.empty(total_samples, device=device, dtype=torch.long).fill_(y) for y in targets]
train_dataset = torch.utils.data.TensorDataset(*train_data, *train_label)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
return train_loader
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import absolute_import, division, print_function, unicode_literals
# Copyright The Microsoft DeepSpeed Team
# DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils import checkpoint
import deepspeed.comm as dist
from torch.nn import Module
import torch.nn.functional as F
import torch.nn.init as init
#from numba import cuda
#from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax
from deepspeed.accelerator import get_accelerator
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased':
"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased':
"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
"""
@torch.jit.script
def f_gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
@torch.jit.script
def bias_tanh(bias, y):
x = bias + y
return torch.tanh(x)
"""
def f_gelu(x):
x_type = x.dtype
x = x.float()
x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
return x.to(x_type)
def bias_gelu(bias, y):
y_type = y.dtype
x = bias.float() + y.float()
x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
return x.to(y_type)
def bias_tanh(bias, y):
y_type = y.dtype
x = bias.float() + y.float()
x = torch.tanh(x)
return x.to(y_type)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return f_gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class GPUTimer:
def __init__(self):
super().__init__()
self.start = get_accelerator().Event() # noqa: F821
self.stop = get_accelerator().Event() # noqa: F821
def record(self):
self.start.record()
def elapsed(self):
self.stop.record()
self.stop.synchronize()
return self.start.elapsed_time(self.stop) / 1000.0
class LinearActivation(Module):
r"""Fused Linear and activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, weights, biases, act='gelu', bias=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.fused_gelu = False
self.fused_tanh = False
if isinstance(act, str):
if bias and act == 'gelu':
self.fused_gelu = True
elif bias and act == 'tanh':
self.fused_tanh = True
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
#self.weight = Parameter(torch.Tensor(out_features, in_features))
self.weight = weights[5]
self.bias = biases[5]
#if bias:
# self.bias = Parameter(torch.Tensor(out_features))
#else:
# self.register_parameter('bias', None)
#self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if self.fused_gelu:
#timing = []
#t1 = GPUTimer()
#t1.record()
y = F.linear(input, self.weight, None)
#timing.append(t1.elapsed())
#t1.record()
bg = bias_gelu(self.bias, y)
#timing.append(t1.elapsed())
return bg
elif self.fused_tanh:
return bias_tanh(self.bias, F.linear(input, self.weight, None))
else:
return self.act_fn(F.linear(input, self.weight, self.bias))
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
is not None)
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
batch_size=8,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
fp16=False):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.batch_size = batch_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.fp16 = fp16
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
import apex
#apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
import apex.normalization
#apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
BertLayerNorm = apex.normalization.FusedLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.query.weight = weights[0]
self.query.bias = biases[0]
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.key.weight = weights[1]
self.key.bias = biases[1]
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.value.weight = weights[2]
self.value.bias = biases[2]
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
#self.softmax_config = DeepSpeedSoftmaxConfig()
#self.softmax_config.batch_size = config.batch_size
#self.softmax_config.max_seq_length = config.max_position_embeddings
#self.softmax_config.hidden_size = config.hidden_size
#self.softmax_config.heads = config.num_attention_heads
#self.softmax_config.softmax_id = i
#self.softmax_config.fp16 = config.fp16
#self.softmax_config.prob_drop_out = 0.0
#self.softmax = DeepSpeedSoftmax(i, self.softmax_config)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask, grads=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = self.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, )
context_layer1 = context_layer1.view(*new_context_layer_shape)
return context_layer1
class BertSelfOutput(nn.Module):
def __init__(self, config, weights, biases):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dense.weight = weights[3]
self.dense.bias = biases[3]
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_w(self):
return self.dense.weight
class BertAttention(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(i, config, weights, biases)
self.output = BertSelfOutput(config, weights, biases)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_w(self):
return self.output.get_w()
class BertIntermediate(nn.Module):
def __init__(self, config, weights, biases):
super(BertIntermediate, self).__init__()
self.dense_act = LinearActivation(config.hidden_size,
config.intermediate_size,
weights,
biases,
act=config.hidden_act)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config, weights, biases):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dense.weight = weights[6]
self.dense.bias = biases[6]
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertLayer, self).__init__()
self.attention = BertAttention(i, config, weights, biases)
self.intermediate = BertIntermediate(config, weights, biases)
self.output = BertOutput(config, weights, biases)
self.weight = weights
self.biases = biases
def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if collect_all_grads:
# self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"]))
# self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"]))
# self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"]))
# self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"]))
self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"]))
self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"]))
self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"]))
self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"]))
self.attention.output.LayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
self.attention.output.LayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"]))
self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"]))
self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"]))
self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"]))
self.output.LayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
self.output.LayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
return layer_output
def get_w(self):
return self.attention.get_w()
class BertEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(BertEncoder, self).__init__()
#layer = BertLayer(config, weights, biases)
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList(
[copy.deepcopy(BertLayer(i, config, weights, biases)) for i in range(config.num_hidden_layers)])
self.grads = []
self.graph = []
def get_grads(self):
return self.grads
# def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
# all_encoder_layers = []
# for layer_module in self.layer:
# hidden_states = layer_module(hidden_states, attention_mask)
# if output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# if not output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# return all_encoder_layers
def get_modules(self, big_node, input):
for mdl in big_node.named_children():
self.graph.append(mdl)
self.get_modules(self, mdl, input)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
l = 0
num_layers = len(self.layer)
chunk_length = math.ceil(math.sqrt(num_layers))
while l < num_layers:
hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1)
l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, self.grads, collect_all_grads=True)
hidden_states.register_hook(lambda x, i=i, self=self: self.grads.append([x, "hidden_state"]))
#print("pytorch weight is: ", layer_module.get_w())
if output_all_encoded_layers:
all_encoder_layers.append((hidden_states))
if not output_all_encoded_layers or checkpoint_activations:
all_encoder_layers.append((hidden_states))
return all_encoder_layers
#class BertEncoder(nn.Module):
# def __init__(self, config):
# super(BertEncoder, self).__init__()
# layer = BertLayer(config)
# self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
#
# def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
# all_encoder_layers = []
# for layer_module in self.layer:
# hidden_states = layer_module(hidden_states, attention_mask)
# if output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# if not output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense_act(first_token_tensor)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
get_accelerator().range_push("decoder input.size() = {}, weight.size() = {}".format(
hidden_states.size(), self.decoder.weight.size()))
hidden_states = self.decoder(hidden_states) + self.bias
get_accelerator().range_pop()
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError("Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls,
pretrained_model_name_or_path,
state_dict=None,
cache_dir=None,
from_tf=False,
*inputs,
**kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
if resolved_archive_file == archive_file: # noqa: F821
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(archive_file,
resolved_archive_file)) # noqa: F821
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf: # noqa: F821
serialization_dir = resolved_archive_file # noqa: F821
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, # noqa: F821
tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive: # noqa: F821
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not get_accelerator().is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys,
error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__,
unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controlled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
checkpoint_activations=False):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
checkpoint_activations=checkpoint_activations)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, args):
super(BertForPreTraining, self).__init__(config)
self.summary_writer = None
if dist.get_rank() == 0:
self.summary_writer = args.summary_writer
self.samples_per_step = dist.get_world_size() * args.train_batch_size
self.sample_count = self.samples_per_step
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def log_summary_writer(self, logs: dict, base='Train'):
if dist.get_rank() == 0:
module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type))
for key, log in logs.items():
self.summary_writer.add_scalar(f'{base}/{module_name}/{key}', log, self.sample_count)
self.sample_count += self.samples_per_step
def forward(self, batch, log=True):
#input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False):
input_ids = batch[1]
token_type_ids = batch[3]
attention_mask = batch[2]
masked_lm_labels = batch[5]
next_sentence_label = batch[4]
checkpoint_activations = False
sequence_output, pooled_output = self.bert(input_ids,
token_type_ids,
attention_mask,
output_all_encoded_layers=False,
checkpoint_activations=checkpoint_activations)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
#print("loss is {} {}".format(masked_lm_loss, next_sentence_loss))
total_loss = masked_lm_loss + next_sentence_loss
# if log:
# self.log_summary_writer(logs={'train_loss': total_loss.item()})
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
masked_lm_labels=None,
checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
next_sentence_label=None,
checkpoint_activations=False):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
start_positions=None,
end_positions=None,
checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import absolute_import, division, print_function, unicode_literals
# Copyright The Microsoft DeepSpeed Team
# DeepSpeed note, code taken from commit 3d59216cec89a363649b4fe3d15295ba936ced0f
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/modeling.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils import checkpoint
import deepspeed.comm as dist
from torch.nn import Module
import torch.nn.functional as F
import torch.nn.init as init
from deepspeed.accelerator import get_accelerator
#from numba import cuda
#from deepspeed_cuda import DeepSpeedSoftmaxConfig, DeepSpeedSoftmax
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased':
"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased':
"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
"""
@torch.jit.script
def f_gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
@torch.jit.script
def bias_tanh(bias, y):
x = bias + y
return torch.tanh(x)
"""
def f_gelu(x):
x_type = x.dtype
x = x.float()
x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
return x.to(x_type)
def bias_gelu(bias, y):
y_type = y.dtype
x = bias.float() + y.float()
x = x * 0.5 * (1.0 + torch.erf(x / 1.41421))
return x.to(y_type)
def bias_tanh(bias, y):
y_type = y.dtype
x = bias.float() + y.float()
x = torch.tanh(x)
return x.to(y_type)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return f_gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class GPUTimer:
def __init__(self):
super().__init__()
self.start = get_accelerator().Event() # noqa: F821
self.stop = get_accelerator().Event() # noqa: F821
def record(self):
self.start.record()
def elapsed(self):
self.stop.record()
self.stop.synchronize()
return self.start.elapsed_time(self.stop) / 1000.0
class LinearActivation(Module):
r"""Fused Linear and activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, weights, biases, act='gelu', bias=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.fused_gelu = False
self.fused_tanh = False
if isinstance(act, str):
if bias and act == 'gelu':
self.fused_gelu = True
elif bias and act == 'tanh':
self.fused_tanh = True
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
#self.weight = Parameter(torch.Tensor(out_features, in_features))
self.weight = weights[5]
self.bias = biases[5]
#if bias:
# self.bias = Parameter(torch.Tensor(out_features))
#else:
# self.register_parameter('bias', None)
#self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if self.fused_gelu:
#timing = []
#t1 = GPUTimer()
#t1.record()
y = F.linear(input, self.weight, None)
#timing.append(t1.elapsed())
#t1.record()
bg = bias_gelu(self.bias, y)
#timing.append(t1.elapsed())
return bg
elif self.fused_tanh:
return bias_tanh(self.bias, F.linear(input, self.weight, None))
else:
return self.act_fn(F.linear(input, self.weight, self.bias))
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
is not None)
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
batch_size=8,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
fp16=False):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.batch_size = batch_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.fp16 = fp16
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
import apex
#apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
import apex.normalization
#apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
BertLayerNorm = apex.normalization.FusedLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
pdtype = x.dtype
x = x.float()
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x.to(pdtype) + self.bias
#def forward(self, x):
# u = x.mean(-1, keepdim=True)
# s = (x - u).pow(2).mean(-1, keepdim=True)
# x = (x - u) / torch.sqrt(s + self.variance_epsilon)
# return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.query.weight = weights[0]
self.query.bias = biases[0]
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.key.weight = weights[1]
self.key.bias = biases[1]
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.value.weight = weights[2]
self.value.bias = biases[2]
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
#self.softmax_config = DeepSpeedSoftmaxConfig()
#self.softmax_config.batch_size = config.batch_size
#self.softmax_config.max_seq_length = config.max_position_embeddings
#self.softmax_config.hidden_size = config.hidden_size
#self.softmax_config.heads = config.num_attention_heads
#self.softmax_config.softmax_id = i
#self.softmax_config.fp16 = config.fp16
#self.softmax_config.prob_drop_out = 0.0
#self.softmax = DeepSpeedSoftmax(i, self.softmax_config)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask, grads=None):
#timing = []
#t1 = GPUTimer()
#t1.record()
mixed_query_layer = self.query(hidden_states)
#timing.append(t1.elapsed())
#print("Query elapsed: %s" % (time.clock() - start))
#t1.record()
mixed_key_layer = self.key(hidden_states)
#timing.append(t1.elapsed())
#print("Key elapsed: %s" % (time.clock() - start))
#t1.record()
mixed_value_layer = self.value(hidden_states)
#timing.append(t1.elapsed())
#print("Value elapsed: %s" % (time.clock() - start))
#t1.record()
query_layer = self.transpose_for_scores(mixed_query_layer)
# print(query_layer)
#timing.append(t1.elapsed())
#print("Query-Transform elapsed: %s" % (time.clock() - start))
#t1.record()
key_layer = self.transpose_key_for_scores(mixed_key_layer)
# print(key_layer)
#timing.append(t1.elapsed())
#print("Key-Transform elapsed: %s" % (time.clock() - start))
#t1.record()
value_layer = self.transpose_for_scores(mixed_value_layer)
#print(value_layer)
#timing.append(t1.elapsed())
#print("Value-Transform elapsed: %s" % (time.clock() - start))
# Take the dot product between "query" and "key" to get the raw attention scores.
#t1.record()
#print(query_layer.shape)
#print(key_layer.shape)
attention_scores = torch.matmul(query_layer, key_layer)
#print(attention_scores.shape)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
#print("Pytorch: ", attention_scores)
#timing.append(t1.elapsed())
#print("Attention-Score elapsed: %s" % (time.clock() - start))
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
#t1.record()
# context_layer = self.softmax(query_layer, key_layer, value_layer, attention_mask)
#print("context shape is :", context_layer.shape)
#print("Cuda-ext:, ", attention_scores1)
# Normalize the attention scores to probabilities.
####attention_probs = self.softmax(attention_scores)
#timing.append(t1.elapsed())
#print("Softmax elapsed: %s" % (time.clock() - start))
#t1 = GPUTimer()
#t1.record()
attention_scores = attention_scores + attention_mask
attention_probs = self.softmax(attention_scores)
#attention_scores = self.softmax(attention_scores, attention_mask)
#print("Softmax elapse {0:8.2f} ms", t1.elapsed() * 1000)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
#t1.record()
context_layer = torch.matmul(attention_probs, value_layer)
#timing.append(t1.elapsed())
#print("Context elapsed: %s" % (time.clock() - start))
#t1.record()
#context_layer1 = context_layer.permute(
# 0, 1, 3, 2, 4).contiguous()
#if grads is not None:
# context_layer.register_hook(lambda x, self = self : grads.append([x, "Context"]))
context_layer1 = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer1.size()[:-2] + (self.all_head_size, )
context_layer1 = context_layer1.view(*new_context_layer_shape)
#timing.append(t1.elapsed())
#print("Context-Transform elapsed: %s" % (time.clock() - start))
if grads is not None:
query_layer.register_hook(lambda x, self=self: grads.append([x, "Query"]))
key_layer.register_hook(lambda x, self=self: grads.append([x, "Key"]))
value_layer.register_hook(lambda x, self=self: grads.append([x, "Value"]))
return context_layer1
class BertSelfOutput(nn.Module):
def __init__(self, config, weights, biases):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dense.weight = weights[3]
self.dense.bias = biases[3]
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
#timing = []
#t1 = GPUTimer()
#t1.record()
hidden_states = self.dense(hidden_states)
#timing.append(t1.elapsed())
#print("Attention Output elapsed: %s" % (time.clock() - start))
hidden_states = self.dropout(hidden_states)
#t1.record()
#hidden_states = self.LayerNorm(hidden_states + input_tensor)
#timing.append(t1.elapsed())
#print("LayerNorm elapsed: %s" % (time.clock() - start))
return hidden_states
def get_w(self):
return self.dense.weight
class BertAttention(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(i, config, weights, biases)
self.output = BertSelfOutput(config, weights, biases)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_w(self):
return self.output.get_w()
class BertIntermediate(nn.Module):
def __init__(self, config, weights, biases):
super(BertIntermediate, self).__init__()
self.dense_act = LinearActivation(config.hidden_size,
config.intermediate_size,
weights,
biases,
act=config.hidden_act)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config, weights, biases):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dense.weight = weights[6]
self.dense.bias = biases[6]
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
#timing = []
#t1 = GPUTimer()
#t1.record()
#print (hidden_states)
#print (self.dense.weight)
hidden_states = self.dense(hidden_states)
#timing.append(t1.elapsed())
#print("FF2 elapsed: %s" % (time.clock() - start))
hidden_states = self.dropout(hidden_states)
#t1.record()
#hidden_states = self.LayerNorm(hidden_states + input_tensor)
#timing.append(t1.elapsed())
#print("LayerNorm elapsed: %s" % (time.clock() - start))
return hidden_states
class BertLayer(nn.Module):
def __init__(self, i, config, weights, biases):
super(BertLayer, self).__init__()
self.attention = BertAttention(i, config, weights, biases)
self.PreAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.PostAttentionLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.intermediate = BertIntermediate(config, weights, biases)
self.output = BertOutput(config, weights, biases)
self.weight = weights
self.biases = biases
def forward(self, hidden_states, attention_mask, grads, collect_all_grads=False):
input_layer_norm = self.PreAttentionLayerNorm(hidden_states)
attention_output = self.attention(input_layer_norm, attention_mask)
#print ("hidden shape is :", hidden_states.shape)
intermediate_input = hidden_states + attention_output
intermediate_layer_norm = self.PostAttentionLayerNorm(intermediate_input)
intermediate_output = self.intermediate(intermediate_layer_norm)
layer_output = self.output(intermediate_output, attention_output)
#attention_output = self.attention(hidden_states, attention_mask)
#intermediate_output = self.intermediate(attention_output)
#layer_output = self.output(intermediate_output, attention_output)
if collect_all_grads:
# self.weight[0].register_hook(lambda x, self=self: grads.append([x,"Q_W"]))
# self.biases[0].register_hook(lambda x, self=self: grads.append([x,"Q_B"]))
# self.weight[1].register_hook(lambda x, self=self: grads.append([x,"K_W"]))
# self.biases[1].register_hook(lambda x, self=self: grads.append([x,"K_B"]))
self.weight[2].register_hook(lambda x, self=self: grads.append([x, "V_W"]))
self.biases[2].register_hook(lambda x, self=self: grads.append([x, "V_B"]))
self.weight[3].register_hook(lambda x, self=self: grads.append([x, "O_W"]))
self.biases[3].register_hook(lambda x, self=self: grads.append([x, "O_B"]))
self.PostAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
self.PostAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
self.weight[5].register_hook(lambda x, self=self: grads.append([x, "int_W"]))
self.biases[5].register_hook(lambda x, self=self: grads.append([x, "int_B"]))
self.weight[6].register_hook(lambda x, self=self: grads.append([x, "out_W"]))
self.biases[6].register_hook(lambda x, self=self: grads.append([x, "out_B"]))
self.PreAttentionLayerNorm.weight.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
self.PreAttentionLayerNorm.bias.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
return layer_output + intermediate_input
def get_w(self):
return self.attention.get_w()
class BertEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(BertEncoder, self).__init__()
#layer = BertLayer(config, weights, biases)
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList(
[copy.deepcopy(BertLayer(i, config, weights, biases)) for i in range(config.num_hidden_layers)])
self.grads = []
self.graph = []
def get_grads(self):
return self.grads
# def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
# all_encoder_layers = []
# for layer_module in self.layer:
# hidden_states = layer_module(hidden_states, attention_mask)
# if output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# if not output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# return all_encoder_layers
def get_modules(self, big_node, input):
for mdl in big_node.named_children():
self.graph.append(mdl)
self.get_modules(self, mdl, input)
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
l = 0
num_layers = len(self.layer)
chunk_length = math.ceil(math.sqrt(num_layers))
while l < num_layers:
hidden_states = checkpoint.checkpoint(custom(l, l + chunk_length), hidden_states, attention_mask * 1)
l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, self.grads, collect_all_grads=True)
hidden_states.register_hook(lambda x, i=i, self=self: self.grads.append([x, "hidden_state"]))
#print("pytorch weight is: ", layer_module.get_w())
if output_all_encoded_layers:
all_encoder_layers.append((hidden_states))
if not output_all_encoded_layers or checkpoint_activations:
hidden_states = self.FinalLayerNorm(hidden_states)
all_encoder_layers.append((hidden_states))
return all_encoder_layers
#class BertEncoder(nn.Module):
# def __init__(self, config):
# super(BertEncoder, self).__init__()
# layer = BertLayer(config)
# self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
#
# def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
# all_encoder_layers = []
# for layer_module in self.layer:
# hidden_states = layer_module(hidden_states, attention_mask)
# if output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# if not output_all_encoded_layers:
# all_encoder_layers.append(hidden_states)
# return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense_act(first_token_tensor)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
get_accelerator().range_push("decoder input.size() = {}, weight.size() = {}".format(
hidden_states.size(), self.decoder.weight.size()))
hidden_states = self.decoder(hidden_states) + self.bias
get_accelerator().range_pop()
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError("Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls,
pretrained_model_name_or_path,
state_dict=None,
cache_dir=None,
from_tf=False,
*inputs,
**kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
if resolved_archive_file == archive_file: # noqa: F821
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(archive_file,
resolved_archive_file)) # noqa: F821
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf: # noqa: F821
serialization_dir = resolved_archive_file # noqa: F821
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, # noqa: F821
tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive: # noqa: F821
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not get_accelerator().is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys,
error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(model.__class__.__name__,
unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controlled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
checkpoint_activations=False):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
checkpoint_activations=checkpoint_activations)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, args):
super(BertForPreTraining, self).__init__(config)
self.summary_writer = None
if dist.get_rank() == 0:
self.summary_writer = args.summary_writer
self.samples_per_step = dist.get_world_size() * args.train_batch_size
self.sample_count = self.samples_per_step
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def log_summary_writer(self, logs: dict, base='Train'):
if dist.get_rank() == 0:
module_name = "Samples" #self._batch_module_name.get(batch_type, self._get_batch_type_error(batch_type))
for key, log in logs.items():
self.summary_writer.add_scalar(f'{base}/{module_name}/{key}', log, self.sample_count)
self.sample_count += self.samples_per_step
def forward(self, batch, log=True):
#input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, checkpoint_activations=False):
input_ids = batch[1]
token_type_ids = batch[3]
attention_mask = batch[2]
masked_lm_labels = batch[5]
next_sentence_label = batch[4]
checkpoint_activations = False
sequence_output, pooled_output = self.bert(input_ids,
token_type_ids,
attention_mask,
output_all_encoded_layers=False,
checkpoint_activations=checkpoint_activations)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
#print("loss is {} {}".format(masked_lm_loss, next_sentence_loss))
total_loss = masked_lm_loss + next_sentence_loss
# if log:
# self.log_summary_writer(logs={'train_loss': total_loss.item()})
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
masked_lm_labels=None,
checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
next_sentence_label=None,
checkpoint_activations=False):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
seq_relationship_score = self.cls(pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids,
flat_token_type_ids,
flat_attention_mask,
output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self,
input_ids,
token_type_ids=None,
attention_mask=None,
start_positions=None,
end_positions=None,
checkpoint_activations=False):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import deepspeed
from unit.common import DistributedTest
from deepspeed.git_version_info import version as ds_version
import os
from unit.simple_model import SimpleModel
@pytest.fixture
def ds_config():
config_dict = {
"elasticity": {
"enabled": True,
"max_train_batch_size": 10000,
"micro_batch_sizes": [8, 12, 16, 17],
"min_gpus": 32,
"max_gpus": 1500,
"min_time": 20,
"version": 0.1
}
}
return config_dict
def test_basic_10k(ds_config):
final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config,
target_deepspeed_version=ds_version)
for gpu_num in valid_gpus:
assert final_batch_size % gpu_num == 0, f"Batch {final_batch_size} is not divisible by GPU count {gpu_num}"
batch_per_gpu = final_batch_size // gpu_num
found_valid_mbsize = False
for mb in ds_config['elasticity']['micro_batch_sizes']:
if batch_per_gpu % mb == 0:
found_valid_mb = True
break
assert found_valid_mb, "No valid mb found"
assert len(valid_gpus) == 23
assert final_batch_size == 9792
def test_old_version(ds_config):
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config,
target_deepspeed_version="0.2")
def test_disabled(ds_config):
ds_config['elasticity']['enabled'] = False
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
final_batch_size, valid_gpus = deepspeed.elasticity.compute_elastic_config(ds_config=ds_config,
target_deepspeed_version=ds_version)
def test_valid_world_size(ds_config):
final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config(
ds_config=ds_config, target_deepspeed_version=ds_version, world_size=64)
assert mbsize == 17
def test_invalid_world_size(ds_config):
with pytest.raises(deepspeed.elasticity.config.ElasticityIncompatibleWorldSize):
final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config(
ds_config=ds_config, target_deepspeed_version=ds_version, world_size=128)
def test_future_elastic_version(ds_config):
ds_config['elasticity']['version'] = '0.3'
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_missing_max_batch(ds_config):
del ds_config['elasticity']['max_train_batch_size']
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_missing_micro_batch(ds_config):
del ds_config['elasticity']['micro_batch_sizes']
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_empty_config():
ds_config = {"elasticity": {"enabled": True}}
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_model_parallel_v1_invalid(ds_config):
ds_config["elasticity"]["model_parallel_size"] = 4
ds_config["elasticity"]["num_gpus_per_node"] = 8
ds_config["elasticity"]["version"] = 0.1
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_model_parallel_v2_invalid(ds_config):
ds_config["elasticity"]["model_parallel_size"] = 16
ds_config["elasticity"]["num_gpus_per_node"] = 8
ds_config["elasticity"]["version"] = 0.2
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config,
target_deepspeed_version=ds_version,
world_size=16)
def test_model_parallel_v2_valid(ds_config):
ds_config["elasticity"]["model_parallel_size"] = 4
ds_config["elasticity"]["num_gpus_per_node"] = 8
ds_config["elasticity"]["version"] = 0.2
os.environ["WORLD_SIZE"] = str(16)
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
os.environ.pop("WORLD_SIZE")
@pytest.mark.parametrize('key, value', [('micro_batch_sizes', [1, 4, -1, 2, -10]), ('min_gpus', -1), ('max_gpus', -1),
('micro_batch_sizes', 5), ('micro_batch_sizes', ['a', None, 0.5]),
('micro_batch_sizes', [2, 0.5, 4])])
def test_invalid_config_values(key, value, ds_config):
ds_config['elasticity'][key] = value
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
deepspeed.elasticity.compute_elastic_config(ds_config=ds_config, target_deepspeed_version=ds_version)
def test_proper_mbsz(ds_config):
ds_config["elasticity"]["max_train_batch_size"] = 32
ds_config["elasticity"]["micro_batch_sizes"] = [1, 2, 3, 7]
ds_config["elasticity"]["min_gpus"] = 1
final_batch_size, valid_gpus, mbsize = deepspeed.elasticity.compute_elastic_config(
ds_config=ds_config, target_deepspeed_version=ds_version, world_size=7)
assert mbsize == 3
class TestNonElasticBatchParams(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"elasticity": {
"enabled": True,
"max_train_batch_size": 4,
"micro_batch_sizes": [1, 2, 3, 4],
"min_gpus": 1,
"max_gpus": 4,
"min_time": 20,
"version": 0.1
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
class TestNonElasticBatchParamsWithOverride(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"elasticity": {
"enabled": True,
"max_train_batch_size": 4,
"micro_batch_sizes": [1, 2, 3, 4],
"min_gpus": 1,
"max_gpus": 4,
"min_time": 20,
"version": 0.1,
"ignore_non_elastic_batch_info": True
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
class TestElasticConfigChanged(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"elasticity": {
"enabled": True,
"max_train_batch_size": 4,
"micro_batch_sizes": [1, 2, 3, 4],
"min_gpus": 1,
"max_gpus": 4,
"min_time": 20,
"version": 0.1,
"ignore_non_elastic_batch_info": True
}
}
import json, os
scheduler_elastic_config = config_dict.copy()
scheduler_elastic_config["elasticity"]["max_train_batch_size"] = 27
os.environ['DEEPSPEED_ELASTICITY_CONFIG'] = json.dumps(scheduler_elastic_config)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
with pytest.raises(deepspeed.elasticity.config.ElasticityError):
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
from deepspeed.launcher import runner as dsrun
def test_parser_mutual_exclusive():
'''Ensure dsrun.parse_resource_filter() raises a ValueError when include_str and
exclude_str are both provided.
'''
with pytest.raises(ValueError):
dsrun.parse_resource_filter({}, include_str='A', exclude_str='B')
def test_parser_local():
''' Test cases with only one node. '''
# First try no include/exclude
hosts = {'worker-0': [0, 1, 2, 3]}
ret = dsrun.parse_resource_filter(hosts)
assert (ret == hosts)
# exclude slots
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:1')
assert (ret == {'worker-0': [0, 2, 3]})
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:1,2')
assert (ret == {'worker-0': [0, 3]})
# only use one slot
ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1')
assert (ret == {'worker-0': [1]})
# including slots multiple times shouldn't break things
ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1,1')
assert (ret == {'worker-0': [1]})
ret = dsrun.parse_resource_filter(hosts, include_str='worker-0:1@worker-0:0,1')
assert (ret == {'worker-0': [0, 1]})
# including just 'worker-0' without : should still use all GPUs
ret = dsrun.parse_resource_filter(hosts, include_str='worker-0')
assert (ret == hosts)
# excluding just 'worker-0' without : should eliminate everything
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0')
assert (ret == {})
# exclude all slots manually
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:0,1,2,3')
assert (ret == {})
def test_parser_multinode():
# First try no include/exclude
hosts = {'worker-0': [0, 1, 2, 3], 'worker-1': [0, 1, 2, 3]}
ret = dsrun.parse_resource_filter(hosts)
assert (ret == hosts)
# include a node
ret = dsrun.parse_resource_filter(hosts, include_str='worker-1:0,3')
assert (ret == {'worker-1': [0, 3]})
# exclude a node
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-1')
assert (ret == {'worker-0': [0, 1, 2, 3]})
# exclude part of each node
ret = dsrun.parse_resource_filter(hosts, exclude_str='worker-0:0,1@worker-1:3')
assert (ret == {'worker-0': [2, 3], 'worker-1': [0, 1, 2]})
def test_parser_errors():
'''Ensure we catch errors. '''
hosts = {'worker-0': [0, 1, 2, 3], 'worker-1': [0, 1, 2, 3]}
# host does not exist
with pytest.raises(ValueError):
dsrun.parse_resource_filter(hosts, include_str='jeff')
with pytest.raises(ValueError):
dsrun.parse_resource_filter(hosts, exclude_str='jeff')
# slot does not exist
with pytest.raises(ValueError):
dsrun.parse_resource_filter(hosts, include_str='worker-1:4')
with pytest.raises(ValueError):
dsrun.parse_resource_filter(hosts, exclude_str='worker-1:4')
# formatting
with pytest.raises(ValueError):
dsrun.parse_resource_filter(hosts, exclude_str='worker-1@worker-0:1@5')
def test_num_plus_parser():
''' Ensure we catch errors relating to num_nodes/num_gpus + -i/-e being mutually exclusive'''
# inclusion
with pytest.raises(ValueError):
dsrun.main(args="--num_nodes 1 -i localhost foo.py".split())
with pytest.raises(ValueError):
dsrun.main(args="--num_nodes 1 --num_gpus 1 -i localhost foo.py".split())
with pytest.raises(ValueError):
dsrun.main(args="--num_gpus 1 -i localhost foo.py".split())
# exclusion
with pytest.raises(ValueError):
dsrun.main(args="--num_nodes 1 -e localhost foo.py".split())
with pytest.raises(ValueError):
dsrun.main(args="--num_nodes 1 --num_gpus 1 -e localhost foo.py".split())
with pytest.raises(ValueError):
dsrun.main(args="--num_gpus 1 -e localhost foo.py".split())
def test_hostfile_good():
# good hostfile w. empty lines and comment
hostfile = """
worker-1 slots=2
worker-2 slots=2
localhost slots=1
123.23.12.10 slots=2
#worker-1 slots=3
# this is a comment
"""
r = dsrun._parse_hostfile(hostfile.splitlines())
assert "worker-1" in r
assert "worker-2" in r
assert "localhost" in r
assert "123.23.12.10" in r
assert r["worker-1"] == 2
assert r["worker-2"] == 2
assert r["localhost"] == 1
assert r["123.23.12.10"] == 2
assert len(r) == 4
def test_hostfiles_bad():
# duplicate host
hostfile = """
worker-1 slots=2
worker-2 slots=1
worker-1 slots=1
"""
with pytest.raises(ValueError):
dsrun._parse_hostfile(hostfile.splitlines())
# incorrect whitespace
hostfile = """
this is bad slots=1
"""
with pytest.raises(ValueError):
dsrun._parse_hostfile(hostfile.splitlines())
# no whitespace
hostfile = """
missingslots
"""
with pytest.raises(ValueError):
dsrun._parse_hostfile(hostfile.splitlines())
# empty
hostfile = """
"""
with pytest.raises(ValueError):
dsrun._parse_hostfile(hostfile.splitlines())
# mix of good/bad
hostfile = """
worker-1 slots=2
this is bad slots=1
worker-2 slots=4
missingslots
"""
with pytest.raises(ValueError):
dsrun._parse_hostfile(hostfile.splitlines())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import argparse
import pytest
import deepspeed
from deepspeed.utils.numa import parse_range_list
def basic_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int)
return parser
def test_no_ds_arguments_no_ds_parser():
parser = basic_parser()
args = parser.parse_args(['--num_epochs', '2'])
assert args
assert hasattr(args, 'num_epochs')
assert args.num_epochs == 2
assert not hasattr(args, 'deepspeed')
assert not hasattr(args, 'deepspeed_config')
def test_no_ds_arguments():
parser = basic_parser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args(['--num_epochs', '2'])
assert args
assert hasattr(args, 'num_epochs')
assert args.num_epochs == 2
assert hasattr(args, 'deepspeed')
assert args.deepspeed == False
assert hasattr(args, 'deepspeed_config')
assert args.deepspeed_config == None
def test_no_ds_enable_argument():
parser = basic_parser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args(['--num_epochs', '2', '--deepspeed_config', 'foo.json'])
assert args
assert hasattr(args, 'num_epochs')
assert args.num_epochs == 2
assert hasattr(args, 'deepspeed')
assert args.deepspeed == False
assert hasattr(args, 'deepspeed_config')
assert type(args.deepspeed_config) == str
assert args.deepspeed_config == 'foo.json'
def test_no_ds_config_argument():
parser = basic_parser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args(['--num_epochs', '2', '--deepspeed'])
assert args
assert hasattr(args, 'num_epochs')
assert args.num_epochs == 2
assert hasattr(args, 'deepspeed')
assert type(args.deepspeed) == bool
assert args.deepspeed == True
assert hasattr(args, 'deepspeed_config')
assert args.deepspeed_config == None
def test_no_ds_parser():
parser = basic_parser()
with pytest.raises(SystemExit):
args = parser.parse_args(['--num_epochs', '2', '--deepspeed'])
def test_core_deepscale_arguments():
parser = basic_parser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args(['--num_epochs', '2', '--deepspeed', '--deepspeed_config', 'foo.json'])
assert args
assert hasattr(args, 'num_epochs')
assert args.num_epochs == 2
assert hasattr(args, 'deepspeed')
assert type(args.deepspeed) == bool
assert args.deepspeed == True
assert hasattr(args, 'deepspeed_config')
assert type(args.deepspeed_config) == str
assert args.deepspeed_config == 'foo.json'
def test_core_binding_arguments():
core_list = parse_range_list("0,2-4,6,8-9")
assert core_list == [0, 2, 3, 4, 6, 8, 9]
try:
# negative case for range overlapping
core_list = parse_range_list("0,2-6,5-9")
except ValueError as e:
pass
else:
# invalid core list must fail
assert False
try:
# negative case for reverse order -- case 1
core_list = parse_range_list("8,2-6")
except ValueError as e:
pass
else:
# invalid core list must fail
assert False
try:
# negative case for reverse order -- case 2
core_list = parse_range_list("1,6-2")
except ValueError as e:
pass
else:
# invalid core list must fail
assert False
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from copy import deepcopy
from deepspeed.launcher import multinode_runner as mnrunner
from deepspeed.launcher.runner import encode_world_info, parse_args
import os
import pytest
@pytest.fixture
def runner_info():
hosts = {'worker-0': 4, 'worker-1': 4}
world_info = encode_world_info(hosts)
env = deepcopy(os.environ)
args = parse_args(['test_launcher.py'])
return env, hosts, world_info, args
def test_pdsh_runner(runner_info):
env, resource_pool, world_info, args = runner_info
runner = mnrunner.PDSHRunner(args, world_info)
cmd, kill_cmd = runner.get_cmd(env, resource_pool)
assert cmd[0] == 'pdsh'
assert env['PDSH_RCMD_TYPE'] == 'ssh'
def test_openmpi_runner(runner_info):
env, resource_pool, world_info, args = runner_info
runner = mnrunner.OpenMPIRunner(args, world_info, resource_pool)
cmd = runner.get_cmd(env, resource_pool)
assert cmd[0] == 'mpirun'
def test_mpich_runner(runner_info):
env, resource_pool, world_info, args = runner_info
runner = mnrunner.MPICHRunner(args, world_info, resource_pool)
cmd = runner.get_cmd(env, resource_pool)
assert cmd[0] == 'mpirun'
def test_slurm_runner(runner_info):
env, resource_pool, world_info, args = runner_info
runner = mnrunner.SlurmRunner(args, world_info, resource_pool)
cmd = runner.get_cmd(env, resource_pool)
assert cmd[0] == 'srun'
def test_mvapich_runner(runner_info):
env, resource_pool, world_info, args = runner_info
runner = mnrunner.MVAPICHRunner(args, world_info, resource_pool)
cmd = runner.get_cmd(env, resource_pool)
assert cmd[0] == 'mpirun'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
import torch.nn as nn
import deepspeed.comm as dist
import pytest
import deepspeed
from deepspeed.pipe import PipelineModule
from deepspeed.utils import RepeatingLoader
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
HIDDEN_DIM = 32
LAYERS = 8
@pytest.fixture
def sequential_model():
model = torch.nn.Sequential(
*[nn.Linear(HIDDEN_DIM, HIDDEN_DIM) for _ in range(LAYERS)],
nn.Linear(HIDDEN_DIM, 1),
)
return model
@pytest.fixture
def simple_config():
config_dict = {
"train_batch_size": 2,
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"pipeline": {
"activation_checkpoint_interval": 1
}
}
return config_dict
@pytest.fixture
def batch_input():
return torch.randn(1, HIDDEN_DIM)
class TestPipeModuleSequential(DistributedTest):
world_size = 2
@pytest.mark.parametrize("activation_checkpoints", [False, True])
def test(self, sequential_model, simple_config, batch_input, activation_checkpoints):
base_model = copy.deepcopy(sequential_model)
base_input = batch_input.clone().detach()
base_output = base_model(base_input)
base_output = base_output
base_params = sum(p.numel() for p in base_model.parameters())
pipe_model = copy.deepcopy(sequential_model)
pipe_model = PipelineModule(layers=pipe_model, num_stages=2)
# Ensure all parameters are accounted for.
my_params = sum(p.numel() for p in pipe_model.parameters())
total_pipe_params = torch.LongTensor([my_params]).to(get_accelerator().device_name())
dist.all_reduce(total_pipe_params)
total_pipe_params = total_pipe_params.item()
assert total_pipe_params == base_params
pipe_model, _, _, _ = deepspeed.initialize(config=simple_config,
model=pipe_model,
model_parameters=[p for p in pipe_model.parameters()])
if activation_checkpoints:
deepspeed.checkpointing.configure(None,
deepspeed_config=pipe_model.config,
partition_activations=True,
contiguous_checkpointing=True,
num_checkpoints=9)
if pipe_model.is_first_stage or pipe_model.is_last_stage:
pipe_input = base_input.clone().detach().to(get_accelerator().device_name())
# label 0 is meaningless
dataset = [(pipe_input, 0)]
loader = RepeatingLoader(dataset)
data_iter = iter(loader)
else:
data_iter = None
pipe_output = pipe_model.eval_batch(data_iter=data_iter)
base_output = base_output.to('cpu')
pipe_output = pipe_output.to('cpu')
assert torch.allclose(base_output, pipe_output, atol=1e-4)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.monitor.tensorboard import TensorBoardMonitor
from deepspeed.monitor.wandb import WandbMonitor
from deepspeed.monitor.csv_monitor import csvMonitor
from deepspeed.monitor.config import DeepSpeedMonitorConfig
from unit.common import DistributedTest
from deepspeed.runtime.config import DeepSpeedConfig
class TestTensorBoard(DistributedTest):
world_size = 2
def test_tensorboard(self):
config_dict = {
"train_batch_size": 2,
"tensorboard": {
"enabled": True,
"output_path": "test_output/ds_logs/",
"job_name": "test"
}
}
ds_config = DeepSpeedConfig(config_dict)
tb_monitor = TensorBoardMonitor(ds_config.monitor_config.tensorboard)
assert tb_monitor.enabled == True
assert tb_monitor.output_path == "test_output/ds_logs/"
assert tb_monitor.job_name == "test"
def test_empty_tensorboard(self):
config_dict = {"train_batch_size": 2, "tensorboard": {}}
ds_config = DeepSpeedConfig(config_dict)
tb_monitor = TensorBoardMonitor(ds_config.monitor_config.tensorboard)
defaults = DeepSpeedMonitorConfig().tensorboard
assert tb_monitor.enabled == defaults.enabled
assert tb_monitor.output_path == defaults.output_path
assert tb_monitor.job_name == defaults.job_name
class TestWandB(DistributedTest):
world_size = 2
def test_wandb(self):
config_dict = {
"train_batch_size": 2,
"wandb": {
"enabled": False,
"group": "my_group",
"team": "my_team",
"project": "my_project"
}
}
ds_config = DeepSpeedConfig(config_dict)
wandb_monitor = WandbMonitor(ds_config.monitor_config.wandb)
assert wandb_monitor.enabled == False
assert wandb_monitor.group == "my_group"
assert wandb_monitor.team == "my_team"
assert wandb_monitor.project == "my_project"
def test_empty_wandb(self):
config_dict = {"train_batch_size": 2, "wandb": {}}
ds_config = DeepSpeedConfig(config_dict)
wandb_monitor = WandbMonitor(ds_config.monitor_config.wandb)
defaults = DeepSpeedMonitorConfig().wandb
assert wandb_monitor.enabled == defaults.enabled
assert wandb_monitor.group == defaults.group
assert wandb_monitor.team == defaults.team
assert wandb_monitor.project == defaults.project
class TestCSVMonitor(DistributedTest):
world_size = 2
def test_csv_monitor(self):
config_dict = {
"train_batch_size": 2,
"csv_monitor": {
"enabled": True,
"output_path": "test_output/ds_logs/",
"job_name": "test"
}
}
ds_config = DeepSpeedConfig(config_dict)
csv_monitor = csvMonitor(ds_config.monitor_config.csv_monitor)
assert csv_monitor.enabled == True
assert csv_monitor.output_path == "test_output/ds_logs/"
assert csv_monitor.job_name == "test"
def test_empty_csv_monitor(self):
config_dict = {"train_batch_size": 2, "csv_monitor": {}}
ds_config = DeepSpeedConfig(config_dict)
csv_monitor = csvMonitor(ds_config.monitor_config.csv_monitor)
defaults = DeepSpeedMonitorConfig().csv_monitor
assert csv_monitor.enabled == defaults.enabled
assert csv_monitor.output_path == defaults.output_path
assert csv_monitor.job_name == defaults.job_name
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.checkpoint import model_3d_desc
def _do_reshape(src_3d, tgt_3d):
assert src_3d.can_reshape(tgt_3d)
new_3d_map = src_3d.reshape(tgt_3d)
assert len(new_3d_map) == tgt_3d.dp_degree
for new_2d_map in new_3d_map:
assert new_2d_map.pp_degree == tgt_3d.pp_degree
assert new_2d_map.tp_degree == tgt_3d.tp_degree
return new_3d_map
# Specify 3d shape as pp/tp/dp
def test_reshape_222_to_111():
src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2)
tgt_3d = model_3d_desc(pp_degree=1, tp_degree=1, dp_degree=1)
new_3d_map = _do_reshape(src_3d, tgt_3d)
assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 1, 5, 2, 6, 3, 7]
def test_reshape_222_to_121():
src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2)
tgt_3d = model_3d_desc(pp_degree=1, tp_degree=2, dp_degree=1)
new_3d_map = _do_reshape(src_3d, tgt_3d)
assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 2, 6]
assert new_3d_map[0].get_data(pp_index=0, tp_index=1) == [1, 5, 3, 7]
def test_reshape_222_to_122():
src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2)
tgt_3d = model_3d_desc(pp_degree=1, tp_degree=2, dp_degree=2)
new_3d_map = _do_reshape(src_3d, tgt_3d)
assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4]
assert new_3d_map[0].get_data(pp_index=0, tp_index=1) == [1, 5]
assert new_3d_map[1].get_data(pp_index=0, tp_index=0) == [2, 6]
assert new_3d_map[1].get_data(pp_index=0, tp_index=1) == [3, 7]
def test_reshape_222_to_211():
src_3d = model_3d_desc(pp_degree=2, tp_degree=2, dp_degree=2)
tgt_3d = model_3d_desc(pp_degree=2, tp_degree=1, dp_degree=1)
new_3d_map = _do_reshape(src_3d, tgt_3d)
assert new_3d_map[0].get_data(pp_index=0, tp_index=0) == [0, 4, 1, 5]
assert new_3d_map[0].get_data(pp_index=1, tp_index=0) == [2, 6, 3, 7]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer
from unit.common import DistributedTest
from unit.simple_model import *
from unit.util import required_torch_version
from unit.checkpoint.common import checkpoint_correctness_verification
import pytest
class TestMoECheckpoint(DistributedTest):
world_size = 4
@pytest.mark.parametrize("ep_size", [4])
def test_checkpoint_moe(self, tmpdir, ep_size):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 16
models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)]
optimizers = [torch.optim.AdamW(params=model.parameters()) for model in models]
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=True,
load_lr_scheduler_states=False,
fp16=config_dict["fp16"]["enabled"],
empty_tag=True,
base_optimizers=optimizers,
seq_dataloader=True)
@pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)])
def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {
"train_batch_size": 8,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 2,
}
}
hidden_dim = 16
models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)]
# param group must have a random unique name (for now)
# TODO: clean-up this requirement, the unique name should not be required here
param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models]
params = [split_params_into_different_moe_groups_for_optimizer(group) for group in param_groups]
optimizers = [torch.optim.AdamW(params=param) for param in params]
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=load_optim_states,
load_lr_scheduler_states=False,
fp16=config_dict["fp16"]["enabled"],
empty_tag=True,
base_optimizers=optimizers,
seq_dataloader=True)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from deepspeed.ops.op_builder import FusedLambBuilder
from unit.common import DistributedTest
from unit.simple_model import *
from unit.checkpoint.common import checkpoint_correctness_verification
import pytest
class TestOtherOptimizerCheckpoint(DistributedTest):
world_size = 2
@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible")
def test_checkpoint_unfused_optimizer(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
},
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_first_step_size": 1000,
"cycle_first_stair_count": 500,
"cycle_second_step_size": 1000,
"cycle_second_stair_count": 500,
"decay_step_size": 1000,
"cycle_min_lr": 0.0001,
"cycle_max_lr": 0.0010,
"decay_lr_rate": 0.001,
"cycle_min_mom": 0.85,
"cycle_max_mom": 0.99,
"decay_mom_rate": 0.0
}
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
# Load & verify optimizer states
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=True)
# Ignore optimizer states
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=False)
def test_checkpoint_fused_optimizer(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
# Load & verify optimizer states
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=True)
# Ignore optimizer states
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=False)
def test_checkpoint_fp32_optimizer(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": False
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
fp16=False)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from unit.common import DistributedTest
from unit.simple_model import *
import pytest
class TestSparseCheckpoint(DistributedTest):
world_size = 2
@pytest.mark.parametrize(["to_save_model_has_embedding", "to_save_model_sparse"], [
[False, False],
[True, False],
[True, True],
])
@pytest.mark.parametrize(["destination_has_embedding", "destination_sparse"], [
[False, False],
[True, False],
[True, True],
])
def test_non_strict_load_sparse(self, tmpdir, to_save_model_has_embedding, to_save_model_sparse,
destination_has_embedding, destination_sparse):
class ModelNoEmbedding(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 1)
def forward(self, x):
return self.linear(x)
class ModelEmbedding(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(10, 3)
self.linear = torch.nn.Linear(3, 1)
def forward(self, x, offsets):
return self.linear(self.emb(x, offsets))
if to_save_model_has_embedding:
model_to_save = ModelEmbedding()
else:
model_to_save = ModelNoEmbedding()
if destination_has_embedding:
model_destination = ModelEmbedding()
else:
model_destination = ModelNoEmbedding()
engine_to_save, _, _, _ = deepspeed.initialize(model=model_to_save,
config={
"train_batch_size": 2,
"sparse_gradients": to_save_model_sparse
})
engine_destination, _, _, _ = deepspeed.initialize(model=model_destination,
config={
"train_batch_size": 2,
"sparse_gradients": destination_sparse
})
save_folder = os.path.join(tmpdir, 'saved_checkpoint')
save_tag = '1'
engine_to_save.save_checkpoint(save_folder, tag=save_tag)
is_sparse_destination = isinstance(model_destination, ModelEmbedding) and destination_sparse
if isinstance(model_destination, ModelEmbedding) and model_destination.emb.sparse:
assert "emb.weight" in engine_destination.sparse_tensor_module_names
engine_destination.load_checkpoint(save_folder,
tag=save_tag,
load_module_strict=False,
load_optimizer_states=False,
load_lr_scheduler_states=False,
load_module_only=False)
if isinstance(model_destination, ModelEmbedding) and isinstance(model_to_save, ModelEmbedding):
assert engine_destination.sparse_tensor_module_names == engine_to_save.sparse_tensor_module_names
elif isinstance(model_destination, ModelEmbedding):
assert not is_sparse_destination or "emb.weight" in engine_destination.sparse_tensor_module_names
else:
assert len(engine_destination.sparse_tensor_module_names) == 0
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from unit.common import DistributedTest
from unit.simple_model import *
import pytest
class TestCheckpointValidationTag(DistributedTest):
world_size = 2
@pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"])
def test_checkpoint_unique_tag(self, tmpdir, valid_mode):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"checkpoint": {
"tag_validation": valid_mode
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
if valid_mode == "FAIL":
with pytest.raises(AssertionError):
model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}")
else:
model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}")
def test_checkpoint_unknown_tag_validation(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"checkpoint": {
"tag_validation": "foo"
}
}
hidden_dim = 10
args = args_from_dict(tmpdir, config_dict)
model = SimpleModel(hidden_dim)
with pytest.raises(deepspeed.DeepSpeedConfigError):
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from deepspeed.ops.op_builder import CPUAdamBuilder
from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest, DistributedFixture
from unit.simple_model import *
from unit.util import required_minimum_torch_version
from unit.checkpoint.common import *
import pytest
class TestZeROCheckpoint(DistributedTest):
world_size = 2
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'),
(2, True, 'deepspeed_adam'),
(3, False, 'Adam'),
(3, True, 'deepspeed_adam')])
def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"wall_clock_breakdown": True,
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
hidden_dim = 10
if zero_stage == 3:
with deepspeed.zero.Init():
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
else:
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True)
@pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"),
(2, True, 'deepspeed_adam'),
(3, False, 'Adam'),
(3, True, 'deepspeed_adam')])
def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
hidden_dim = 10
if zero_stage == 3:
global DeepSpeedZeroOptimizer_Stage3
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
with deepspeed.zero.Init():
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
else:
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False)
@pytest.mark.parametrize('zero_stage', [1, 2])
def test_hybrid_optimizer_state(self, tmpdir, zero_stage):
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage
},
"zero_allow_untested_optimizer": True,
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 10
models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)]
optimizers = [HybridStateOptimizer(model.parameters()) for model in models]
checkpoint_correctness_verification(config_dict,
models=models,
base_optimizers=optimizers,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=True)
@pytest.mark.parametrize('zero_stage', [0, 1, 2, 3])
def test_load_module_only(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 2,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": zero_stage,
}
}
hidden_dim = 10
if zero_stage == 3:
with deepspeed.zero.Init():
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
else:
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True)
class ws4_model_checkpoint(DistributedFixture):
world_size = 4
def run(self, class_tmpdir, elastic_save, load_optim):
ds_config = {
"train_batch_size": 4,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 2,
"elastic_checkpoint": elastic_save
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
if load_optim:
torch.save(model.optimizer.optimizer.state_dict(), os.path.join(class_tmpdir, 'opt-state-dict'))
model.save_checkpoint(class_tmpdir)
@pytest.mark.parametrize("elastic_save", [True, False])
@pytest.mark.parametrize("elastic_load", [True, False])
@pytest.mark.parametrize("load_optim", [True, False])
class TestZeROElasticCheckpoint(DistributedTest):
world_size = 2
def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim):
ds_config = {
"train_batch_size": 2,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 2,
"elastic_checkpoint": elastic_save
}
}
hidden_dim = 10
# torch 1.2.* stores raw tensor id numbers in checkpoint state which leads to
# false positive mismatches in checkpoint state comparisons.
# Newer torch versions store tensor ids as 0, 1, 2, ...
expected_mismatch_keys = [] if required_minimum_torch_version(1, 4) else ['params']
models = [SimpleModel(hidden_dim) for _ in range(2)]
model, _, _, _ = deepspeed.initialize(config=ds_config,
model=models[0],
model_parameters=models[0].parameters())
data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
if load_optim:
torch.save(model.optimizer.optimizer.state_dict(), os.path.join(tmpdir, 'opt-state-dict'))
model.save_checkpoint(tmpdir)
ds_config["zero_optimization"]["elastic_checkpoint"] = elastic_load
model, _, _, _ = deepspeed.initialize(config=ds_config,
model=models[1],
model_parameters=models[1].parameters())
model.load_checkpoint(tmpdir, load_optimizer_states=load_optim)
if load_optim:
saved_sd = torch.load(os.path.join(tmpdir, 'opt-state-dict'))
curr_sd = model.optimizer.optimizer.state_dict()
for curr_param_group, saved_param_group in zip(curr_sd['param_groups'], saved_sd['param_groups']):
compare_state_dicts(curr_param_group, saved_param_group, expected_mismatch_keys)
data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load,
load_optim):
ds_config = {
"train_batch_size": 4,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 2,
"elastic_checkpoint": elastic_load
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
# Load checkpoint with dp world size = 2
model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters())
if load_optim:
with pytest.raises(deepspeed.runtime.zero.utils.ZeRORuntimeException):
model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim)
else:
model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim)
class TestZeROSaveLoadEdgeCase(DistributedTest):
world_size = 2
@pytest.mark.parametrize('zero_stage', [0, 1, 2, 3])
def test_immediate_save_load(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 4,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": zero_stage,
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None)
ds_model.save_checkpoint(tmpdir)
ds_model.load_checkpoint(tmpdir,
load_optimizer_states=False,
load_lr_scheduler_states=False,
load_module_only=False)
@pytest.mark.parametrize('zero_stage', [0, 1, 2, 3])
def test_load_immediate_save(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 4,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": zero_stage,
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
# 1. pretrain a model and save it
dtype = torch.half
ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None)
data_loader = random_dataloader(model=ds_model,
total_samples=1,
hidden_dim=hidden_dim,
device=ds_model.device,
dtype=dtype)
for _, batch in enumerate(data_loader):
loss = ds_model(batch[0], batch[1])
ds_model.backward(loss)
ds_model.step()
ds_model.empty_partition_cache()
ds_model.save_checkpoint(tmpdir)
# 2. load and immediately save a model with a fresh ds engine
ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None)
ds_model.load_checkpoint(tmpdir,
load_optimizer_states=False,
load_lr_scheduler_states=False,
load_module_only=False)
ds_model.save_checkpoint(tmpdir)
@pytest.mark.parametrize('zero_stage', [0, 1, 2, 3])
def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage):
config_dict = {
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": zero_stage,
"stage3_gather_fp16_weights_on_model_save": True,
},
"gradient_accumulation_steps": 2,
"train_micro_batch_size_per_gpu": 1,
"train_batch_size": 4,
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
# This test reproduces a bug where one tries to retrieve a 16bit model before grad_accum
# cycle was completed.
# So we config grad_accum=2 and step only once and save_16bit_model
ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None)
data_loader = random_dataloader(model=ds_model,
total_samples=2,
hidden_dim=hidden_dim,
device=ds_model.device,
dtype=torch.half)
batch = next(iter(data_loader))
loss = ds_model(batch[0], batch[1])
ds_model.backward(loss)
ds_model.step()
ds_model.empty_partition_cache()
# we stepped only once, and now save 16bit model before gradient_accumulation_steps=2 is complete
ds_model.save_16bit_model(tmpdir, "model.pt")
# let's test just as well that we can save the checkpoint too
ds_model.save_checkpoint(tmpdir)
class TestZeROCheckpointFrozenWeights(DistributedTest):
world_size = 2
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
def test_load_optimizer_state(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"wall_clock_breakdown": True,
"zero_optimization": {
"stage": zero_stage
}
}
hidden_dim = 10
with deepspeed.zero.Init(enabled=zero_stage == 3):
models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True)
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
def test_not_load_optimizer_state(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage
}
}
hidden_dim = 10
with deepspeed.zero.Init(enabled=zero_stage == 3):
models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False)
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
def test_load_module_only(self, tmpdir, zero_stage):
config_dict = {
"train_batch_size": 2,
"optimizer": {
"type": 'Adam'
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": zero_stage,
}
}
hidden_dim = 10
with deepspeed.zero.Init(enabled=zero_stage == 3):
models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True)
class TestSaveTensorClone(DistributedTest):
world_size = 1
@pytest.mark.parametrize('zero_stage', [1, 2])
@pytest.mark.parametrize('use_cpu_device', [True, False])
def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device):
ds_config = {
"optimizer": {
"type": "AdamW",
},
"zero_optimization": {
"stage": zero_stage
},
"train_batch_size": 1,
"train_micro_batch_size_per_gpu": 1
}
hidden_dim = 1024
model = SimpleModel(hidden_dim, nlayers=4).half()
ref_model_state_dict = model.state_dict()
ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config)
clone_device = torch.device('cpu') if use_cpu_device else get_accelerator().current_device()
clone_state_dict = clone_tensors_for_torch_save(ds_engine.module.state_dict())
compare_state_dicts(ref_model_state_dict, clone_state_dict)
ref_ckpt_file = os.path.join(tmpdir, 'ref_ckpt.pt')
torch.save(ref_model_state_dict, ref_ckpt_file)
clone_ckpt_file = os.path.join(tmpdir, 'clone_ckpt.pt')
torch.save(clone_state_dict, clone_ckpt_file)
compare_state_dicts(torch.load(ref_ckpt_file), torch.load(clone_ckpt_file))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import numbers
import deepspeed
from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from unit.simple_model import *
def compare_deepspeed_states(saved_model, loaded_model):
# These are compared in more depth in other places
assert hasattr(loaded_model, 'module')
assert saved_model.sparse_tensor_module_names == loaded_model.sparse_tensor_module_names
assert saved_model.skipped_steps == loaded_model.skipped_steps
assert saved_model.global_steps == loaded_model.global_steps
def zero3_params_to_fetch(param_list):
return [p for p in param_list if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE]
def compare_model_states(saved_model, loaded_model, compare_optimizer=True, load_module_only=False):
if not load_module_only:
compare_deepspeed_states(saved_model, loaded_model)
params_to_fetch = zero3_params_to_fetch(
list(saved_model.module.named_parameters()) + list(loaded_model.module.named_parameters()))
enable_gather = len(params_to_fetch) > 0
with deepspeed.zero.GatheredParameters(params_to_fetch, enabled=enable_gather):
for p0, p1 in zip(saved_model.module.named_parameters(), loaded_model.module.named_parameters()):
np0, p0 = p0
np1, p1 = p1
if 'deepspeed_moe.gate.wg' in np0:
# these params are converted to float at runtime, cast to half for comparison
p1 = p1.half()
p0 = p0.half()
assert id(p0) != id(p1), f'Comparing fp16 model state tensor against itself : {id(p0)} <====> {id(p1)}'
try:
assert torch.allclose(p0, p1,
atol=1e-07), f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}"
except RuntimeError as err:
print(f"FP16 model state {p0} is not equal to {p1}, names:{np0}, {np1}")
raise err
if not compare_optimizer:
return
if DeepSpeedZeroOptimizer_Stage3 is not None and isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer_Stage3):
for p0, p1 in zip(saved_model.optimizer.fp32_partitioned_groups_flat,
loaded_model.optimizer.fp32_partitioned_groups_flat):
assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
elif isinstance(saved_model.optimizer, DeepSpeedZeroOptimizer):
for p0, p1 in zip(saved_model.optimizer.single_partition_of_fp32_groups,
loaded_model.optimizer.single_partition_of_fp32_groups):
assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
assert torch.allclose(p0, p1, atol=1e-07), f"Fp32 model states {p0} is not equal to {p1}"
elif isinstance(saved_model.optimizer, FP16_Optimizer):
for p0, p1 in zip(saved_model.optimizer.fp32_groups_flat, loaded_model.optimizer.fp32_groups_flat):
assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
elif isinstance(saved_model.optimizer, FP16_UnfusedOptimizer):
for params0, params1 in zip(saved_model.optimizer.fp32_groups, loaded_model.optimizer.fp32_groups):
for p0, p1 in zip(params0, params1):
assert id(p0) != id(p1), f'Comparing fp32 model state tensor against itself: {id(p0)} <====> {id(p1)}'
assert torch.allclose(p0, p1, atol=1e-07), f"FP32 model states {p0} is not equal to {p1}"
elif isinstance(saved_model.optimizer, torch.optim.Optimizer):
pass
else:
assert False, f'Unexpected Optimizer Type: {saved_model.optimizer}'
def compare_state_dicts(state0, state1, expected_mismatch_keys=[]):
for (k0, s0), (k1, s1) in zip(state0.items(), state1.items()):
assert k0 == k1, f'failure due to key mismatch {k0} != {k1}'
if k0 in expected_mismatch_keys:
continue
if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor):
assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}'
assert torch.equal(s0.to('cpu'), s1.to('cpu'))
else:
assert s0 == s1, f'failures with keys = {k0}, {k1}, values = {type(s0[0])} and {type(s1[0])}'
def compare_optimizer_states(saved_model, loaded_model, hidden_dim, fp16=True):
saved_optimizer = saved_model.optimizer.optimizer if fp16 else saved_model.optimizer
loaded_optimizer = loaded_model.optimizer.optimizer if fp16 else loaded_model.optimizer
for state0, state1 in zip(saved_optimizer.state.values(), loaded_optimizer.state.values()):
compare_state_dicts(state0, state1)
def compare_lr_scheduler_states(saved_model, loaded_model):
assert hasattr(saved_model, 'lr_scheduler')
assert hasattr(loaded_model, 'lr_scheduler')
saved_scheduler = saved_model.lr_scheduler
loaded_scheduler = loaded_model.lr_scheduler
assert hasattr(saved_scheduler, 'state_dict')
assert hasattr(loaded_scheduler, 'state_dict')
saved_sd = saved_scheduler.state_dict()
loaded_sd = loaded_scheduler.state_dict()
print(f"saved_sd = {saved_sd}")
print(f"loaded_sd = {loaded_sd}")
assert saved_sd.keys() == loaded_sd.keys()
for state0, state1 in zip(saved_sd.values(), loaded_sd.values()):
if isinstance(state0, numbers.Number) and isinstance(state1, numbers.Number):
assert state0 == state1
# following mixture-of-experts.md
def create_moe_param_groups(model):
from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer
parameters = {'params': [p for p in model.parameters()], 'name': 'parameters'}
return split_params_into_different_moe_groups_for_optimizer(parameters)
def create_deepspeed_model(config_dict, model, base_optimizer):
ds_model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=create_moe_param_groups(model),
optimizer=base_optimizer)
ds_model.empty_partition_cache()
return ds_model
def checkpoint_correctness_verification(config_dict,
models,
hidden_dim,
tmpdir,
load_optimizer_states=False,
load_lr_scheduler_states=False,
fp16=True,
train_batch=False,
base_optimizers=[None, None],
empty_tag=False,
seq_dataloader=False,
load_module_only=False):
dtype = torch.half if fp16 else torch.float32
ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0])
if seq_dataloader:
data_loader = sequence_dataloader(model=ds_model,
total_samples=50,
hidden_dim=hidden_dim,
device=ds_model.device,
dtype=dtype)
else:
data_loader = random_dataloader(model=ds_model,
total_samples=50,
hidden_dim=hidden_dim,
device=ds_model.device,
dtype=dtype)
if train_batch:
ds_model.set_dataloader(data_loader)
for _, batch in enumerate(data_loader):
loss = ds_model.train_batch()
else:
for _, batch in enumerate(data_loader):
loss = ds_model(batch[0], batch[1])
ds_model.backward(loss)
ds_model.step()
# Flush zero stage 3 cache
ds_model.empty_partition_cache()
trained_model = ds_model
save_folder = os.path.join(tmpdir, 'saved_checkpoint')
save_tag = None if empty_tag else '1'
trained_model.save_checkpoint(save_folder, tag=save_tag)
dist.barrier()
for root, _, files in os.walk(save_folder):
for f in files:
if "_expert_" in f and "_model_states" in f:
expert = torch.load(os.path.join(root, f))
needed, storages = 0, {}
for name, tensor in expert.items():
needed += tensor.size().numel()
storage = tensor.storage()
# some storage can be shared within an expert's checkpoint
storages[storage.data_ptr()] = storage.size()
stored = sum(v for _, v in storages.items())
assert needed == stored, f"MoE expert checkpoint uses more storage than required: {f}"
loaded_model = create_deepspeed_model(config_dict=config_dict, model=models[1], base_optimizer=base_optimizers[1])
assert list(trained_model.parameters())[0].dtype == list(loaded_model.parameters())[0].dtype
loaded_model.load_checkpoint(save_folder,
tag=save_tag,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states,
load_module_only=load_module_only)
compare_model_states(trained_model,
loaded_model,
compare_optimizer=load_optimizer_states,
load_module_only=load_module_only)
if load_optimizer_states:
compare_optimizer_states(trained_model, loaded_model, hidden_dim, fp16)
if load_lr_scheduler_states:
compare_lr_scheduler_states(trained_model, loaded_model)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from deepspeed.ops.op_builder import CPUAdamBuilder
from unit.common import DistributedTest
from unit.simple_model import *
from unit.checkpoint.common import checkpoint_correctness_verification
import pytest
@pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False),
(3, True)])
class TestLRSchedulerCheckpoint(DistributedTest):
world_size = 2
def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.00015,
"betas": [0.8, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 0.001,
"warmup_num_steps": 1000
}
}
}
hidden_dim = 10
if zero_stage == 3:
global DeepSpeedZeroOptimizer_Stage3
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
with deepspeed.zero.Init():
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
else:
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict,
models,
hidden_dim,
tmpdir,
load_optimizer_states=False,
load_lr_scheduler_states=True)
def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 1e-5
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 0.001,
"warmup_num_steps": 1000
}
},
}
hidden_dim = 10
if zero_stage == 3:
with deepspeed.zero.Init():
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
else:
models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)]
checkpoint_correctness_verification(config_dict,
models,
hidden_dim,
tmpdir,
load_optimizer_states=False,
load_lr_scheduler_states=False)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from unit.common import DistributedTest
from unit.simple_model import *
from unit.checkpoint.common import checkpoint_correctness_verification
from unit.util import skip_on_arch
import pytest
class TestPipelineCheckpoint(DistributedTest):
world_size = 4
@pytest.mark.parametrize("zero_stage", [0, 1])
def test_checkpoint_pipe_engine(self, zero_stage, tmpdir):
skip_on_arch(min_arch=7)
config_dict = {
"train_batch_size": 2,
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-5
}
},
"zero_optimization": {
"stage": zero_stage
},
"fp16": {
"enabled": zero_stage > 0
},
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_first_step_size": 1000,
"cycle_first_stair_count": 500,
"cycle_second_step_size": 1000,
"cycle_second_stair_count": 500,
"decay_step_size": 1000,
"cycle_min_lr": 0.0001,
"cycle_max_lr": 0.0010,
"decay_lr_rate": 0.001,
"cycle_min_mom": 0.85,
"cycle_max_mom": 0.99,
"decay_mom_rate": 0.0
}
}
}
models = [LinearStackPipe(num_stages=2) for _ in range(2)]
checkpoint_correctness_verification(config_dict=config_dict,
models=models,
hidden_dim=models[0].hidden_dim,
tmpdir=tmpdir,
fp16=config_dict['fp16']['enabled'],
load_optimizer_states=True,
load_lr_scheduler_states=True,
train_batch=True)
@pytest.mark.parametrize(
"base_topo,test_topo",
[
#(PipeTopo(num_pp=1,
# num_dp=4),
# PipeTopo(num_pp=4,
# num_dp=1)),
#(PipeTopo(num_pp=2,
# num_dp=2),
# PipeTopo(num_pp=2,
# num_dp=2)),
#(PipeTopo(num_pp=4,
# num_dp=1),
# PipeTopo(num_pp=2,
# num_dp=2)),
])
def test_checkpoint_pipe_module(self, base_topo, test_topo, tmpdir):
checkpoint_engine = TorchCheckpointEngine()
base_model = LinearStackPipe(topology=base_topo)
base_model.save_state_dict(tmpdir, checkpoint_engine=checkpoint_engine)
dist.barrier()
test_model = LinearStackPipe(topology=test_topo)
test_model.load_state_dir(tmpdir, checkpoint_engine=checkpoint_engine)
# Base and test can have different lengths, so make sure we map from the
# smaller to larger model
if len(base_model.forward_funcs) < len(test_model.forward_funcs):
A = base_model
B = test_model
else:
A = test_model
B = base_model
# Compare layers individually since partitions are different
for idx, A_layer in enumerate(A.forward_funcs):
if not hasattr(A_layer, 'parameters'):
# Skip functionals, etc.
continue
# Find the corresponding layer in B
global_idx = idx + A._local_start
B_local_idx = global_idx - B._local_start
B_layer = B.forward_funcs[B_local_idx]
# Compare layer parameters
for p0, p1 in zip(A_layer.parameters(), B_layer.parameters()):
assert torch.allclose(p0, p1, atol=1e-07), f"Model state {p0} is not equal to {p1}"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
from unit.common import DistributedTest
from unit.simple_model import *
from unit.checkpoint.common import checkpoint_correctness_verification
class TestLatestCheckpoint(DistributedTest):
world_size = 1
def test_existing_latest(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
}
}
hidden_dim = 10
models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)]
checkpoint_correctness_verification(config_dict=config_dict,
models=models,
hidden_dim=hidden_dim,
tmpdir=tmpdir,
load_optimizer_states=True,
load_lr_scheduler_states=False,
fp16=False,
empty_tag=True)
def test_missing_latest(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# should be no-op, since latest doesn't exist
model.load_checkpoint(tmpdir)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import os
import json
from pydantic import Field, ValidationError
from typing import List
from deepspeed.runtime import config as ds_config
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
class SimpleConf(DeepSpeedConfigModel):
param_1: int = 0
param_2_old: str = Field(None, deprecated=True, new_param="param_2", new_param_fn=(lambda x: [x]))
param_2: List[str] = None
param_3: int = Field(0, alias="param_3_alias")
def test_only_required_fields(tmpdir):
'''Ensure that config containing only the required fields is accepted. '''
cfg_json = tmpdir.mkdir('ds_config_unit_test').join('minimal.json')
with open(cfg_json, 'w') as f:
required_fields = {'train_batch_size': 64}
json.dump(required_fields, f)
run_cfg = ds_config.DeepSpeedConfig(cfg_json)
assert run_cfg is not None
assert run_cfg.train_batch_size == 64
assert run_cfg.train_micro_batch_size_per_gpu == 64
assert run_cfg.gradient_accumulation_steps == 1
def test_config_duplicate_key(tmpdir):
config_dict = '''
{
"train_batch_size": 24,
"train_batch_size": 24,
}
'''
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as jf:
jf.write("%s" % config_dict)
with pytest.raises(ValueError):
run_cfg = ds_config.DeepSpeedConfig(config_path)
def test_config_base():
config = SimpleConf(**{"param_1": 42})
assert config.param_1 == 42
def test_config_base_deprecatedfield():
config = SimpleConf(**{"param_2_old": "DS"})
assert config.param_2 == ["DS"]
def test_config_base_aliasfield():
config = SimpleConf(**{"param_3": 10})
assert config.param_3 == 10
config = SimpleConf(**{"param_3_alias": 10})
assert config.param_3 == 10
@pytest.mark.parametrize("config_dict", [{"param_1": "DS"}, {"param_2": "DS"}, {"param_1_typo": 0}])
def test_config_base_literalfail(config_dict):
with pytest.raises(ValidationError):
config = SimpleConf(**config_dict)
def test_config_base_deprecatedfail():
with pytest.raises(AssertionError):
config = SimpleConf(**{"param_2": ["DS"], "param_2_old": "DS"})
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import pytest
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, random_dataloader
from deepspeed.runtime.lr_schedules import LR_RANGE_TEST, LR_RANGE_TEST_MIN_LR, LR_RANGE_TEST_STEP_RATE, LR_RANGE_TEST_STEP_SIZE, LR_RANGE_TEST_STAIRCASE
from deepspeed.runtime.lr_schedules import WARMUP_LR, WARMUP_MIN_LR, WARMUP_MAX_LR, WARMUP_NUM_STEPS, WARMUP_TYPE, WARMUP_LOG_RATE, WARMUP_LINEAR_RATE
from deepspeed.runtime.lr_schedules import ONE_CYCLE, CYCLE_MIN_LR, CYCLE_MAX_LR, CYCLE_FIRST_STEP_SIZE, DECAY_LR_RATE, DECAY_STEP_SIZE
from deepspeed.runtime.lr_schedules import CYCLE_MIN_MOM, CYCLE_MAX_MOM, DECAY_MOM_RATE
from deepspeed.runtime.lr_schedules import WARMUP_DECAY_LR, TOTAL_NUM_STEPS
def _verify_continuous_decrease(values):
for i in range(len(values) - 1):
assert values[i] > values[i + 1]
def _verify_continuous_increase(values):
for i in range(len(values) - 1):
assert values[i] < values[i + 1]
def _verify_staircase_increase(values, step_size):
num_values = len(values)
for i in range(0, num_values, step_size):
j = min(i + step_size, num_values)
assert all([values[i] == v for v in values[i:j]])
@pytest.mark.parametrize("scheduler_type,params", [(WARMUP_LR, {}),
(WARMUP_DECAY_LR, {
WARMUP_NUM_STEPS: 10,
TOTAL_NUM_STEPS: 20
}), (ONE_CYCLE, {
CYCLE_MIN_LR: 0,
CYCLE_MAX_LR: 0.1
}), (LR_RANGE_TEST, {})])
class TestGetLrBeforeTrain(DistributedTest):
world_size = 1
def test(self, scheduler_type, params):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": scheduler_type,
"params": params
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
# get lr before training starts
lr_scheduler.get_lr()
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("warmup_num_steps", [10, 15, 19, 33])
@pytest.mark.parametrize("warmup_type", [WARMUP_LOG_RATE, WARMUP_LINEAR_RATE])
class TestLrSchedule(DistributedTest):
world_size = 1
def test_lr_warmup_schedule(self, warmup_num_steps, warmup_type):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": WARMUP_LR,
"params": {
WARMUP_MIN_LR: 0.1,
WARMUP_MAX_LR: 0.2,
WARMUP_NUM_STEPS: warmup_num_steps,
WARMUP_TYPE: warmup_type,
}
},
"gradient_clipping": 1.0
}
schedule_params = config_dict["scheduler"]["params"]
total_num_steps = 2 * warmup_num_steps
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=total_num_steps * 2,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
step_lrs = []
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
step_lrs.append(lr_scheduler.get_lr())
# Verify initial lr
assert step_lrs[0] == [schedule_params[WARMUP_MIN_LR]]
# Verify warmup completion
warmup_num_steps = schedule_params[WARMUP_NUM_STEPS]
warmup_max_lr = [schedule_params[WARMUP_MAX_LR]]
assert step_lrs[warmup_num_steps] == warmup_max_lr
# Verify post-warmup completion
assert all([warmup_max_lr == lr for lr in step_lrs[warmup_num_steps:]])
def test_lr_warmup_decay_schedule(self, warmup_num_steps, warmup_type):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": WARMUP_DECAY_LR,
"params": {
WARMUP_MIN_LR: 0.1,
WARMUP_MAX_LR: 0.2,
WARMUP_NUM_STEPS: warmup_num_steps,
TOTAL_NUM_STEPS: warmup_num_steps * 2,
WARMUP_TYPE: warmup_type
}
},
"gradient_clipping": 1.0
}
schedule_params = config_dict["scheduler"]["params"]
total_num_steps = schedule_params[TOTAL_NUM_STEPS]
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=total_num_steps * 2,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
step_lrs = []
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
step_lrs.append(lr_scheduler.get_lr())
# Verify initial lr
assert step_lrs[0] == [schedule_params[WARMUP_MIN_LR]]
# Verify lr at warmup completion
warmup_num_steps = schedule_params[WARMUP_NUM_STEPS]
warmup_max_lr = [schedule_params[WARMUP_MAX_LR]]
assert step_lrs[warmup_num_steps] == warmup_max_lr
# Verify decay phase
previous_lr = warmup_max_lr
for lr in step_lrs[warmup_num_steps + 1:]:
assert lr < previous_lr
previous_lr = lr
@pytest.mark.parametrize("scheduler_type,params", [(WARMUP_LR, {}),
(WARMUP_DECAY_LR, {
WARMUP_NUM_STEPS: 5,
TOTAL_NUM_STEPS: 10
}),
(ONE_CYCLE, {
CYCLE_MIN_LR: 0,
CYCLE_MAX_LR: 0.1,
CYCLE_FIRST_STEP_SIZE: 5,
DECAY_STEP_SIZE: 5
}),
(LR_RANGE_TEST, {
LR_RANGE_TEST_MIN_LR: 1e-4,
LR_RANGE_TEST_STEP_SIZE: 1
})])
class TestSchedulerOptimizerParity(DistributedTest):
world_size = 1
def test(self, scheduler_type, params):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": scheduler_type,
"params": params
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
assert lr_scheduler.get_lr() == model.get_lr()
@pytest.mark.parametrize("min_lr, step_rate, step_size, staircase",
[(1e-4, 1e-5, 1, True),
(1e-5, 1e-5, 1, False),
(1e-4, 1e-3, 10, True),
(1e-3, 1e-3, 10, False),
(1e-2, 1e-2, 19, True),
(1e-2, 1e-2, 19, False)
])# yapf: disable
class TestLrRange(DistributedTest):
world_size = 1
def test(self, min_lr, step_rate, step_size, staircase):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": LR_RANGE_TEST,
"params": {
LR_RANGE_TEST_MIN_LR: min_lr,
LR_RANGE_TEST_STEP_RATE: step_rate,
LR_RANGE_TEST_STEP_SIZE: step_size,
LR_RANGE_TEST_STAIRCASE: staircase
}
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=max(50, step_size * 2),
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
step_lrs = []
for _, batch in enumerate(data_loader):
step_lrs.extend(lr_scheduler.get_lr())
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Verify starting lr
assert step_lrs[0] == min_lr
if staircase:
# Verify staircase increasing lr
_verify_staircase_increase(step_lrs, step_size)
else:
# Verify continuous increasing lr
_verify_continuous_increase(step_lrs)
class TestOneCycle(DistributedTest):
world_size = 1
@pytest.mark.parametrize("min_lr, max_lr, decay_rate, cycle_step_size, decay_step_size",
[
(1e-5, 1e-2, 1e-3, 10, 10),
(1e-3, 1e-1, 0, 21, 21),
(1e-5, 1e-2, 1e-3, 10, 10),
(1e-3, 1e-1, 1e-1, 21, 21),
(1e-5, 1e-1, 0, 10, 0),
]) # yapf: disable
def test_lr(self, min_lr, max_lr, decay_rate, cycle_step_size, decay_step_size):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": ONE_CYCLE,
"params": {
CYCLE_MIN_LR: min_lr,
CYCLE_MAX_LR: max_lr,
DECAY_LR_RATE: decay_rate,
CYCLE_FIRST_STEP_SIZE: cycle_step_size,
DECAY_STEP_SIZE: decay_step_size
}
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=max(50, cycle_step_size * 3),
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
step_lrs = []
for _, batch in enumerate(data_loader):
step_lrs.extend(lr_scheduler.get_lr())
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Verify starting lr
assert step_lrs[0] == min_lr
# Verify peak lr
assert step_lrs[cycle_step_size] == max_lr
# Verify increasing phase
_verify_continuous_increase(step_lrs[:cycle_step_size])
# Verify decreasing phase
_verify_continuous_decrease(step_lrs[cycle_step_size:(cycle_step_size * 2)])
# Verify decay phase
if decay_rate > 0:
_verify_continuous_decrease(step_lrs[(cycle_step_size * 2):])
@pytest.mark.parametrize("min_mom, max_mom, decay_rate, step_size",
[
(0.08, 0.09, 1e-3, 10),
(0.08, 0.09, 0, 21),
(0.08, 0.09, 1e-3, 10),
(0.08, 0.09, 0, 21),
]) # yapf: disable
def test_mom(self, min_mom, max_mom, decay_rate, step_size):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
},
},
"scheduler": {
"type": ONE_CYCLE,
"params": {
CYCLE_MIN_LR: 1e-3,
CYCLE_MAX_LR: 1e-2,
CYCLE_MIN_MOM: min_mom,
CYCLE_MAX_MOM: max_mom,
DECAY_MOM_RATE: decay_rate,
CYCLE_FIRST_STEP_SIZE: step_size,
DECAY_STEP_SIZE: step_size
}
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=max(50, step_size * 3),
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
step_moms = []
for _, batch in enumerate(data_loader):
step_moms.append(lr_scheduler.get_mom())
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Verify starting lr
assert step_moms[0][0][0] == max_mom
# Verify peak lr
assert step_moms[step_size][0][0] == min_mom
# Verify decreasing phase
_verify_continuous_decrease(step_moms[:step_size])
# Verify increasing phase
_verify_continuous_increase(step_moms[step_size:(step_size * 2)])
# Verify decay phase
if decay_rate > 0:
_verify_continuous_increase(step_moms[(step_size * 2):])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from pytest import approx
from unit.common import DistributedTest
from unit.multi_output_model import MultiOutputModel, multi_output_dataloader
class TestTwoOutputModel(DistributedTest):
world_size = 1
def test(self, tmpdir):
grad_accumulation_steps = 2
micro_batch_size = 1
world_size = self.world_size
config_dict = {
"train_micro_batch_size_per_gpu": micro_batch_size,
"gradient_accumulation_steps": grad_accumulation_steps,
"train_batch_size": micro_batch_size * grad_accumulation_steps * world_size,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True
}
}
hidden_dim = 10
weight_value = 0.1
model = MultiOutputModel(hidden_dim, weight_value)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
total_samples = 4
data_loader = multi_output_dataloader(model=model,
total_samples=total_samples,
hidden_dim=hidden_dim,
device=model.device,
inputs=[1.0, 2.0],
targets=[1, 2])
for n, batch in enumerate(data_loader):
assert len(batch) % 2 == 0, \
f"multi_output_dataloader failed to return even number of data samples (input+target)"
midpoint = len(batch) // 2
inputs, targets = batch[:midpoint], batch[midpoint:]
loss_tuple = model(inputs, targets)
expected_loss = torch.tensor(2.302734375, dtype=torch.half, device=model.device)
for loss in loss_tuple:
assert loss.shape == torch.Size([])
assert loss.item() == approx(expected_loss.item())
summed_loss = sum(loss_tuple)
scaled_loss = model.backward(summed_loss)
expected_scaled_loss = summed_loss.float() / grad_accumulation_steps
assert scaled_loss.item() == approx(expected_scaled_loss.item())
model.step()
class TestThreeOutputModel(DistributedTest):
world_size = 1
def test(self, tmpdir):
grad_accumulation_steps = 3
micro_batch_size = 1
world_size = 1
config_dict = {
"train_micro_batch_size_per_gpu": micro_batch_size,
"gradient_accumulation_steps": grad_accumulation_steps,
"train_batch_size": micro_batch_size * grad_accumulation_steps * world_size,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True
}
}
hidden_dim = 10
weight_value = 0.1
model = MultiOutputModel(hidden_dim, weight_value)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
total_samples = grad_accumulation_steps * micro_batch_size * 2
data_loader = multi_output_dataloader(model=model,
total_samples=total_samples,
hidden_dim=hidden_dim,
device=model.device,
inputs=[1.0, 2.0, 3.0],
targets=[1, 2, 3])
for n, batch in enumerate(data_loader):
assert len(batch) % 2 == 0, \
f"multi_output_dataloader failed to return even number of data samples (input+target)"
midpoint = len(batch) // 2
inputs, targets = batch[:midpoint], batch[midpoint:]
loss_tuple = model(inputs, targets)
assert len(loss_tuple) == 3
expected_loss = torch.tensor(2.302734375, dtype=torch.half, device=model.device)
for loss in loss_tuple:
assert loss.shape == torch.Size([])
assert loss.item() == approx(expected_loss.item())
summed_loss = sum(loss_tuple)
scaled_loss = model.backward(summed_loss)
expected_scaled_loss = summed_loss.float() / grad_accumulation_steps
assert scaled_loss.item() == approx(expected_scaled_loss.item())
model.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
from deepspeed.runtime.zero.linear import LinearModuleForZeroStage3
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
@pytest.mark.parametrize('half_op', [False, True])
class TestAutoCastDisable(DistributedTest):
def test_missing_amp_autocast(self, half_op):
hidden_dim = 4
if half_op:
input = torch.randn(hidden_dim).to(get_accelerator().device_name()).half()
ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()).half()
else:
input = torch.randn(hidden_dim).to(get_accelerator().device_name())
ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name())
output = ds_linear(input)
assert output.dtype == ds_linear.weight.dtype
def test_disable_autocast_linear(self, half_op):
amp = get_accelerator().amp()
hidden_dim = 4
if half_op:
input = torch.randn(hidden_dim).to(get_accelerator().device_name()).half()
ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name()).half()
else:
input = torch.randn(hidden_dim).to(get_accelerator().device_name())
ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name())
with amp.autocast(False):
output = ds_linear(input)
assert output.dtype == ds_linear.weight.dtype
@pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed')
@pytest.mark.parametrize('half_input, half_weight', [(False, False), (False, True), (True, False), (True, True)])
class TestAutoCastEnable(DistributedTest):
def test_autocast_linear(self, tmpdir, half_input, half_weight):
amp = get_accelerator().amp()
hidden_dim = 4
input = torch.randn(hidden_dim).to(get_accelerator().device_name())
ds_linear = LinearModuleForZeroStage3(hidden_dim, hidden_dim).to(get_accelerator().device_name())
if half_input:
input = input.half()
if half_weight:
ds_linear = ds_linear.half()
with amp.autocast():
output = ds_linear(input)
assert output.dtype == torch.half or output.dtype == torch.bfloat16
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch._utils import _flatten_dense_tensors
import deepspeed.comm as dist
import pytest
import deepspeed.runtime.utils as ds_utils
import deepspeed.utils.groups as groups
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
def test_call_to_str():
c2s = ds_utils.call_to_str
assert c2s('int') == 'int()'
assert c2s('int', 3) == 'int(3)'
assert c2s('int', 3, 'jeff') == 'int(3, \'jeff\')'
assert c2s('hello', val=3) == 'hello(val=3)'
assert c2s('hello', 1138, val=3) == 'hello(1138, val=3)'
class TestClibGradNorm(DistributedTest):
world_size = 2
def test(self):
param1 = torch.nn.Parameter(torch.Tensor([0]))
param1.grad = torch.Tensor([1])
param2 = torch.nn.Parameter(torch.Tensor([0]))
param2.grad = torch.Tensor([dist.get_rank() + 1])
# param2 is now MoE parameter
param2.allreduce = False
parameters = [param1, param2]
groups._create_expert_and_data_parallel(2)
norm = ds_utils.clip_grad_norm_(parameters, max_norm=0.1)
norm = torch.Tensor([norm]).to(get_accelerator().device_name(dist.get_rank()))
world_size = dist.get_world_size()
gathered_norm = [torch.zeros(1).to(get_accelerator().device_name()) for i in range(world_size)]
dist.all_gather(gathered_norm, norm)
assert gathered_norm[0] == gathered_norm[1], "norm at rank 0 does not match the norm at rank 1"
@pytest.mark.parametrize("check_using_norm", [(False), (True)])
class TestCheckOverflow(DistributedTest):
world_size = 2
def test(self, check_using_norm):
groups._create_expert_and_data_parallel(2)
param1 = torch.nn.Parameter(torch.Tensor([0]))
param1.grad = torch.Tensor([1])
param2 = torch.nn.Parameter(torch.Tensor([0]))
if dist.get_rank() == 0:
param2.grad = torch.Tensor([1])
else:
param2.grad = torch.Tensor([float("inf")])
param2.allreduce = False
# param2 is now MoE parameter
parameters = [param1, param2]
if check_using_norm:
grads_group_flat = [_flatten_dense_tensors([p.grad for p in parameters])]
norm = ds_utils.get_weight_norm(grads_group_flat)
overflow_checker = ds_utils.CheckOverflow([parameters])
overflow = overflow_checker.check_using_norm([norm], reduce_overflow=False)
else:
overflow_checker = ds_utils.CheckOverflow([parameters])
overflow = overflow_checker.check()
assert overflow
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
from typing import Callable
import torch
from torch.optim import Optimizer, Adam, AdamW
from torch.optim.lr_scheduler import _LRScheduler, LambdaLR
from unit.simple_model import SimpleModel, random_dataloader
from unit.common import DistributedTest
from unit.util import required_torch_version, bf16_required_version_check, required_amp_check
import deepspeed
from deepspeed.ops.adam import FusedAdam
from deepspeed.runtime.lr_schedules import WARMUP_LR, WarmupLR
from deepspeed.runtime.config import ADAM_OPTIMIZER
from deepspeed.runtime.utils import see_memory_usage
@pytest.mark.parametrize('zero_stage', [0, 3])
class TestNoOptim(DistributedTest):
world_size = 1
def test(self, zero_stage):
if zero_stage == 3 and not required_torch_version():
pytest.skip("zero-3 param offload requires at least torch 1.8")
ds_config = {
'train_batch_size': self.world_size,
'fp16': {
'enabled': True
},
'zero_optimization': {
"stage": zero_stage,
"offload_param": {
"device": "cpu"
}
}
}
# 20B test
#hidden_dim = 16 * 1024
hidden_dim = 4
with deepspeed.zero.Init(enabled=zero_stage == 3, config_dict_or_path=ds_config):
model = SimpleModel(hidden_dim, nlayers=78)
see_memory_usage('pre-init', force=True)
model, _, _, _ = deepspeed.initialize(model=model, config=ds_config)
see_memory_usage('post-init', force=True)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.half)
for batch in data_loader:
model(batch[0], batch[1])
see_memory_usage('post-fwds', force=True)
@pytest.mark.parametrize('optimizer_type', [None, Optimizer, Callable])
class TestClientOptimizer(DistributedTest):
world_size = 1
def test(self, optimizer_type):
def _optimizer_callable(params) -> Optimizer:
return AdamW(params=params)
hidden_dim = 10
model = SimpleModel(hidden_dim)
config_dict = {'train_batch_size': 1}
if optimizer_type is None:
client_optimizer = None
config_dict['optimizer'] = {'type': ADAM_OPTIMIZER}
elif optimizer_type is Optimizer:
client_optimizer = Adam(model.parameters())
else:
client_optimizer = _optimizer_callable
_, ds_optimizer, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=list(model.parameters()),
optimizer=client_optimizer)
if client_optimizer is None:
assert isinstance(ds_optimizer, FusedAdam)
elif isinstance(client_optimizer, Optimizer):
assert ds_optimizer == client_optimizer
else:
assert isinstance(ds_optimizer, AdamW)
@pytest.mark.parametrize('client_parameters', [True, False])
class TestConfigOptimizer(DistributedTest):
world_size = 1
def test(self, client_parameters):
ds_config = {"train_batch_size": 1, "optimizer": {"type": "Adam", "params": {"lr": 0.001}}}
hidden_dim = 10
model = SimpleModel(hidden_dim)
if client_parameters:
model_parameters = list(model.parameters())
else:
model_parameters = None
_, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters)
assert isinstance(ds_optimizer, FusedAdam)
@pytest.mark.parametrize('optimizer_extension', ['zero1', 'zero2', 'amp', None])
@pytest.mark.parametrize('model_dtype', ['fp16', 'bf16', 'fp32'])
@pytest.mark.parametrize('grad_accum_dtype', [None, 'fp16', 'bf16', 'fp32'])
class TestOptimizerImplementation(DistributedTest):
world_size = 1
def test(self, optimizer_extension, model_dtype, grad_accum_dtype):
if optimizer_extension == 'zero1':
zero_stage = 1
elif optimizer_extension == 'zero2':
zero_stage = 2
else:
zero_stage = 0
amp = True if optimizer_extension == 'amp' else False
fp16 = True if model_dtype == 'fp16' else False
bf16 = True if model_dtype == 'bf16' else False
# Skip checks
if bf16 and not bf16_required_version_check():
pytest.skip(
"DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
if amp and not required_amp_check():
pytest.skip("Amp is not installed can't run amp check")
# Config declaration
ds_config = {
"train_batch_size": 1,
'fp16': {
'enabled': fp16
},
'bf16': {
'enabled': bf16
},
'amp': {
'enabled': amp
},
'zero_optimization': {
"stage": zero_stage
},
"data_types": {
"grad_accum_dtype": grad_accum_dtype
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001
}
}
}
key = (optimizer_extension, model_dtype, grad_accum_dtype)
# Enumerate supported configurations
is_supported = {}
# ZeRO 1 Wrapper
is_supported[('zero1', 'fp16', None)] = True
is_supported[('zero1', 'fp16', 'fp16')] = True
is_supported[('zero1', 'bf16', None)] = True
is_supported[('zero1', 'bf16', 'bf16')] = True
is_supported[('zero1', 'bf16', 'fp32')] = True
is_supported[('zero1', 'fp32', None)] = True
is_supported[('zero1', 'fp32', 'fp32')] = True
# ZeRO 2 Wrapper
is_supported[('zero2', 'fp16', None)] = True
is_supported[('zero2', 'fp16', 'fp16')] = True
is_supported[('zero2', 'bf16', None)] = True
is_supported[('zero2', 'bf16', 'bf16')] = True
is_supported[('zero2', 'fp32', None)] = True
is_supported[('zero2', 'fp32', 'fp32')] = True
# Amp Wrapper
is_supported[('amp', 'fp32', None)] = True
is_supported[('amp', 'fp32', 'fp32')] = True
# FP16 Wrapper
is_supported[(None, 'fp16', None)] = True
is_supported[(None, 'fp16', 'fp16')] = True
# BF16 Wrapper
is_supported[(None, 'bf16', 'fp32')] = True
is_supported[(None, 'bf16', None)] = True
# No Wrapper
is_supported[(None, 'fp32', None)] = True
is_supported[(None, 'fp32', 'fp32')] = True
hidden_dim = 10
model = SimpleModel(hidden_dim)
model_parameters = list(model.parameters())
if key in is_supported:
_, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config,
model=model,
model_parameters=model_parameters)
assert True
else:
with pytest.raises(NotImplementedError):
_, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config,
model=model,
model_parameters=model_parameters)
@pytest.mark.parametrize("scheduler_type", [None, _LRScheduler, Callable])
@pytest.mark.parametrize("optimizer_type", [None, Optimizer, Callable])
class TestClientLrScheduler(DistributedTest):
world_size = 1
def test(self, scheduler_type, optimizer_type):
def _my_lambda(epoch):
return epoch // 10
def _optimizer_callable(params) -> Optimizer:
return torch.optim.AdamW(params=params)
def _lr_scheduler_callable(optimizer) -> _LRScheduler:
return LambdaLR(optimizer, _my_lambda)
hidden_dim = 10
model = SimpleModel(hidden_dim)
config_dict = {'train_batch_size': 1}
client_optimizer = None
client_scheduler = None
if optimizer_type is None:
config_dict['optimizer'] = {'type': ADAM_OPTIMIZER}
elif optimizer_type is Optimizer:
client_optimizer = torch.optim.Adam(model.parameters())
else:
client_optimizer = _optimizer_callable
if scheduler_type is None:
config_dict['scheduler'] = {'type': WARMUP_LR, 'params': {}}
elif scheduler_type == _LRScheduler:
if isinstance(client_optimizer, Optimizer):
client_scheduler = LambdaLR(client_optimizer, _my_lambda)
else:
# Verify invalid combination is correctly handled
client_scheduler = LambdaLR(torch.optim.Adam(model.parameters()), _my_lambda)
else:
client_scheduler = _lr_scheduler_callable
if isinstance(client_scheduler, _LRScheduler) and not isinstance(client_optimizer, Optimizer):
with pytest.raises(AssertionError):
_, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=list(model.parameters()),
optimizer=client_optimizer,
lr_scheduler=client_scheduler)
else:
_, _, _, ds_lr_scheduler = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=list(model.parameters()),
optimizer=client_optimizer,
lr_scheduler=client_scheduler)
if client_scheduler is None:
assert isinstance(ds_lr_scheduler, WarmupLR)
elif isinstance(client_scheduler, _LRScheduler):
assert ds_lr_scheduler == client_scheduler
else:
assert isinstance(ds_lr_scheduler, LambdaLR)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import deepspeed
import pytest
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, PLD_SimpleModel, random_dataloader
@pytest.mark.parametrize('theta', [0, 0.1, 0.9, 1.0])
def test_pld_schedule(tmpdir, theta):
gamma = 0.001
pld_scheduler = ProgressiveLayerDrop(theta, gamma)
for i in range(10):
pld_scheduler.update_state(i)
expected_theta = (1. - theta) * np.exp(-gamma * i) + theta
actual_theta = pld_scheduler.get_theta()
assert expected_theta == actual_theta
@pytest.mark.parametrize('theta', [0, 0.1, 0.9, 1.0])
class TestPLDModel(DistributedTest):
world_size = 1
def test_pld_model(self, theta):
gamma = 0.001
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.0001
}
},
"fp16": {
"enabled": True
},
"progressive_layer_drop": {
"enabled": True,
"theta": theta,
"gamma": gamma
}
}
hidden_dim = 10
model = PLD_SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
expected_theta = (1. - theta) * np.exp(-gamma * i) + theta
actual_theta = model.get_pld_theta()
assert expected_theta == actual_theta
class TestNonPLDModel(DistributedTest):
world_size = 1
def test_non_pld_model(self):
gamma = 0.001
theta = 0.5
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": 'Adam',
"params": {
"lr": 0.0001
}
},
"fp16": {
"enabled": True
},
"progressive_layer_drop": {
"enabled": True,
"theta": theta,
"gamma": gamma
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
with pytest.raises(TypeError):
loss = model(batch[0], batch[1])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.utils import RepeatingLoader
import torch
import pytest
import deepspeed
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, random_dataset
def test_repeating_loader():
loader = [1, 2, 3]
loader = RepeatingLoader(loader)
for idx in range(50):
assert next(loader) == 1
assert next(loader) == 2
assert next(loader) == 3
@pytest.mark.parametrize('train_batch_size, drop_last', [(1, True), (4, True), (1, False), (4, False)])
class TestDataLoaderDropLast(DistributedTest):
world_size = 1
def test(self, train_batch_size, drop_last):
config_dict = {"train_batch_size": train_batch_size, "dataloader_drop_last": drop_last, "steps_per_print": 1}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.AdamW(params=model.parameters())
# TODO: no way to set DeepSpeedEngine.deepspeed_io params, need to use
# pin_memory=False for cuda device
train_dataset = random_dataset(total_samples=50,
hidden_dim=hidden_dim,
device=torch.device('cpu'),
dtype=torch.float32)
model, _, training_dataloader, _ = deepspeed.initialize(config=config_dict,
model=model,
training_data=train_dataset,
optimizer=optimizer)
for n, batch in enumerate(training_dataloader):
x = batch[0].to(get_accelerator().current_device_name())
y = batch[1].to(get_accelerator().current_device_name())
loss = model(x, y)
model.backward(loss)
model.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import os
import deepspeed
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
from unit.simple_model import Curriculum_SimpleModel, SimpleModel, random_dataloader, random_dataset
class MPU():
def __init__(self, tp_world_size):
self.rank = deepspeed.comm.get_rank()
self.world_size = deepspeed.comm.get_world_size()
self.tp_world_size = tp_world_size
for i in range(0, self.world_size, tp_world_size):
ranks = range(i, i + tp_world_size)
group = deepspeed.comm.new_group(ranks)
if self.rank in ranks:
self.tp_group = group
for i in range(0, tp_world_size):
ranks = range(i, self.world_size, tp_world_size)
group = deepspeed.comm.new_group(ranks)
if self.rank in ranks:
self.dp_group = group
def get_model_parallel_rank(self):
return self.rank % self.tp_world_size
def get_model_parallel_world_size(self):
return self.tp_world_size
def get_data_parallel_rank(self):
return self.rank // self.tp_world_size
def get_data_parallel_world_size(self):
return self.world_size // self.tp_world_size
def get_data_parallel_group(self):
return self.dp_group
def get_model_parallel_group(self):
return self.tp_group
class TestDataEfficiency(DistributedTest):
world_size = 2
def test_curriculum_learning(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"data_efficiency": {
"enabled": True,
"seed": 1234,
"data_sampling": {
"enabled": True,
"num_workers": 0,
"curriculum_learning": {
"enabled": True,
"data_cluster_path": "/tmp",
"curriculum_metrics": {
"dummy_metric": {
"index_to_sample_path": "dummy",
"index_to_metric_path": "dummy",
"difficulty_type": "value",
"clustering_type": "single_cluster",
"min_difficulty": 2,
"max_difficulty": 10,
"schedule_type": "fixed_root",
"schedule_config": {
"total_curriculum_step": 8,
"difficulty_step": 2,
"root_degree": 1
}
}
}
}
}
}
}
def data_post_process(data, data_sampler_state_dict):
assert 'dummy_metric' in data_sampler_state_dict['current_difficulties']
return data
hidden_dim = 10
model = SimpleModel(hidden_dim)
dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=torch.half)
model, _, data_loader, _ = deepspeed.initialize(config=config_dict,
model=model,
training_data=dataset,
model_parameters=model.parameters(),
mpu=MPU(1))
if model.mpu.get_data_parallel_rank() == 0 and not os.path.exists('/tmp'):
os.makedirs('/tmp')
model.set_data_post_process_func(data_post_process)
for n, batch in enumerate(data_loader):
x = batch[0].to(get_accelerator().current_device_name())
y = batch[1].to(get_accelerator().current_device_name())
loss = model(x, y)
model.backward(loss)
model.step()
if n >= 10:
break
class TestLegacyCurriculumScheduler(DistributedTest):
world_size = 2
def test_fixed_discrete(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"curriculum_learning": {
"enabled": True,
"curriculum_type": "seqlen",
"min_difficulty": 1,
"max_difficulty": 5,
"schedule_type": "fixed_discrete",
"schedule_config": {
"difficulty": [1, 2, 3, 4, 5],
"max_step": [2, 4, 6, 8]
}
}
}
hidden_dim = 10
ground_truths = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4}
model = Curriculum_SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss, seqlen = model(batch[0], batch[1])
model.backward(loss)
model.step()
true_seqlen = 5
if n + 1 in ground_truths:
true_seqlen = ground_truths[n + 1]
assert seqlen == true_seqlen, f"Incorrect curriculum schedule"
def test_fixed_linear(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"curriculum_learning": {
"enabled": True,
"curriculum_type": "seqlen",
"min_difficulty": 2,
"max_difficulty": 10,
"schedule_type": "fixed_linear",
"schedule_config": {
"total_curriculum_step": 8,
"difficulty_step": 2
}
}
}
hidden_dim = 10
ground_truths = {1: 2, 2: 4, 3: 4, 4: 6, 5: 6, 6: 8, 7: 8, 8: 10, 9: 10, 10: 10}
model = Curriculum_SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss, seqlen = model(batch[0], batch[1])
model.backward(loss)
model.step()
if n + 1 in ground_truths:
true_seqlen = ground_truths[n + 1]
assert seqlen == true_seqlen, f"Incorrect curriculum schedule"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# A test on its own
import os
import pytest
import json
import hjson
import argparse
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest, get_test_path
from unit.simple_model import SimpleModel, create_config_from_dict, random_dataloader
import deepspeed.comm as dist
# A test on its own
import deepspeed
from deepspeed.runtime.config import DeepSpeedConfig, get_bfloat16_enabled
class TestBasicConfig(DistributedTest):
world_size = 1
def test_accelerator(self):
assert (get_accelerator().is_available())
def test_check_version(self):
assert hasattr(deepspeed, "__git_hash__")
assert hasattr(deepspeed, "__git_branch__")
assert hasattr(deepspeed, "__version__")
assert hasattr(deepspeed, "__version_major__")
assert hasattr(deepspeed, "__version_minor__")
assert hasattr(deepspeed, "__version_patch__")
@pytest.fixture
def base_config():
config_dict = {
"train_batch_size": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True
}
}
return config_dict
def _run_batch_config(ds_config, train_batch=None, micro_batch=None, gas=None):
ds_config.train_batch_size = train_batch
ds_config.train_micro_batch_size_per_gpu = micro_batch
ds_config.gradient_accumulation_steps = gas
success = True
try:
ds_config._configure_train_batch_size()
except AssertionError:
success = False
return success
def _batch_assert(status, ds_config, batch, micro_batch, gas, success):
if not success:
assert not status
print("Failed but All is well")
return
assert ds_config.train_batch_size == batch
assert ds_config.train_micro_batch_size_per_gpu == micro_batch
assert ds_config.gradient_accumulation_steps == gas
print("All is well")
#Tests different batch config provided in deepspeed json file
@pytest.mark.parametrize('num_ranks,batch,micro_batch,gas,success',
[(2,32,16,1,True),
(2,32,8,2,True),
(2,33,17,2,False),
(2,32,18,1,False)]) # yapf: disable
class TestBatchConfig(DistributedTest):
world_size = 2
def test(self, num_ranks, batch, micro_batch, gas, success):
assert dist.get_world_size() == num_ranks, \
'The test assumes a world size of f{num_ranks}'
ds_batch_config = get_test_path('ds_batch_config.json')
ds_config = DeepSpeedConfig(ds_batch_config)
#test cases when all parameters are provided
status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch, gas=gas)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
#test cases when two out of three parameters are provided
status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
if success:
#when gas is provided with one more parameter
status = _run_batch_config(ds_config, train_batch=batch, gas=gas)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
status = _run_batch_config(ds_config, micro_batch=micro_batch, gas=gas)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
#test the case when only micro_batch or train_batch is provided
if gas == 1:
status = _run_batch_config(ds_config, micro_batch=micro_batch)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
status = _run_batch_config(ds_config, train_batch=batch)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
else:
#when only gas is provided
status = _run_batch_config(ds_config, gas=gas)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
#when gas is provided with something else and gas does not divide batch
if gas != 1:
status = _run_batch_config(ds_config, train_batch=batch, gas=gas)
_batch_assert(status, ds_config, batch, micro_batch, gas, success)
def test_temp_config_json(tmpdir):
config_dict = {
"train_batch_size": 1,
}
config_path = create_config_from_dict(tmpdir, config_dict)
config_json = json.load(open(config_path, 'r'))
assert 'train_batch_size' in config_json
@pytest.mark.parametrize("gather_weights_key",
["stage3_gather_16bit_weights_on_model_save", "stage3_gather_fp16_weights_on_model_save"])
def test_gather_16bit_params_on_model_save(gather_weights_key):
config_dict = {
gather_weights_key: True,
}
config = DeepSpeedZeroConfig(**config_dict)
assert config.gather_16bit_weights_on_model_save == True
@pytest.mark.parametrize("bf16_key", ["bf16", "bfloat16"])
def test_get_bfloat16_enabled(bf16_key):
cfg = {
bf16_key: {
"enabled": True,
},
}
assert get_bfloat16_enabled(cfg) == True
class TestConfigLoad(DistributedTest):
world_size = 1
def test_dict(self, base_config):
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters())
def test_json(self, base_config, tmpdir):
config_path = os.path.join(tmpdir, "config.json")
with open(config_path, 'w') as fp:
json.dump(base_config, fp)
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters())
def test_hjson(self, base_config, tmpdir):
config_path = os.path.join(tmpdir, "config.json")
with open(config_path, 'w') as fp:
hjson.dump(base_config, fp)
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters())
class TestDeprecatedDeepScaleConfig(DistributedTest):
world_size = 1
def test(self, base_config, tmpdir):
config_path = create_config_from_dict(tmpdir, base_config)
parser = argparse.ArgumentParser()
args = parser.parse_args(args='')
args.deepscale_config = config_path
args.local_rank = 0
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(args=args, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestDistInit(DistributedTest):
world_size = 1
def test(self, base_config):
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=base_config,
model=model,
model_parameters=model.parameters(),
dist_init_required=True)
data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestInitNoOptimizer(DistributedTest):
world_size = 1
def test(self, base_config):
del base_config["optimizer"]
hidden_dim = 10
model = SimpleModel(hidden_dim=hidden_dim)
model, _, _, _ = deepspeed.initialize(config=base_config, model=model)
data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
with pytest.raises(AssertionError):
model.backward(loss)
with pytest.raises(AssertionError):
model.step()
class TestArgs(DistributedTest):
world_size = 1
def test_none_args(self, base_config):
model = SimpleModel(hidden_dim=10)
model, _, _, _ = deepspeed.initialize(args=None, model=model, config=base_config)
data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
def test_no_args(self, base_config):
model = SimpleModel(hidden_dim=10)
model, _, _, _ = deepspeed.initialize(model=model, config=base_config)
data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
class TestNoModel(DistributedTest):
world_size = 1
def test(self, base_config):
model = SimpleModel(hidden_dim=10)
with pytest.raises(AssertionError):
model, _, _, _ = deepspeed.initialize(model=None, config=base_config)
with pytest.raises(AssertionError):
model, _, _, _ = deepspeed.initialize(model, config=base_config)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from unit.common import DistributedTest
from unit.util import skip_on_arch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(10, 3, mode="sum", sparse=True)
self.linear = torch.nn.Linear(3, 1)
def forward(self, x, offsets):
return self.linear(self.emb(x, offsets))
class Adam(torch.optim.Optimizer):
def __init__(self, dense_params, sparse_params):
super().__init__(dense_params + sparse_params, defaults={})
self.adam = torch.optim.Adam(dense_params)
self.adam_sparse = torch.optim.SparseAdam(sparse_params)
@torch.no_grad()
def step(self, closure=None):
loss_1 = self.adam.step(closure)
loss_2 = self.adam_sparse.step(closure)
if loss_1 is not None and loss_2 is not None:
return loss_1 + loss_2
return loss_1 or loss_2
def get_model_optimizer():
torch.manual_seed(0)
model = Model()
optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters()))
return model, optimizer
def get_data(device):
x = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long, device=device)
offsets = torch.tensor([0, 4], dtype=torch.long, device=device)
y = torch.tensor([[1.0], [0.0]], device=device)
return x, offsets, y
class TestSparseAdam(DistributedTest):
world_size = 2
def test(self):
skip_on_arch(min_arch=7)
config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True}
model, optimizer = get_model_optimizer()
loss = torch.nn.BCEWithLogitsLoss()
engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict)
x, offsets, y = get_data(engine.device)
engine.gradient_average = True
res = engine(x, offsets)
engine.backward(loss(res, y))
averaged_grads = {}
for k, v in engine.named_parameters():
grad = v.grad.to_dense() if v.grad.is_sparse else v.grad
averaged_grads[k] = grad
v.grad = None
engine.gradient_average = False
res = engine(x, offsets)
engine.backward(loss(res, y))
for k, v in engine.named_parameters():
grad = v.grad.to_dense() if v.grad.is_sparse else v.grad
assert torch.allclose(grad, averaged_grads[k] * engine.world_size)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from unit.common import DistributedTest
import deepspeed.utils.groups as groups
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(10, 3, mode="sum", sparse=True)
self.linear = torch.nn.Linear(3, 1)
def forward(self, x, offsets):
return self.linear(self.emb(x, offsets))
class Adam(torch.optim.Optimizer):
def __init__(self, dense_params, sparse_params):
super().__init__(dense_params + sparse_params, defaults={})
self.adam = torch.optim.Adam(dense_params)
self.adam_sparse = torch.optim.SparseAdam(sparse_params)
@torch.no_grad()
def step(self, closure=None):
loss_1 = self.adam.step(closure)
loss_2 = self.adam_sparse.step(closure)
if loss_1 is not None and loss_2 is not None:
return loss_1 + loss_2
return loss_1 or loss_2
class TestSparseAdam(DistributedTest):
world_size = 2
def test(self):
config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True}
model = Model()
optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters()))
engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict)
loss = torch.nn.BCEWithLogitsLoss()
x = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long, device=engine.device)
offsets = torch.tensor([0, 4], dtype=torch.long, device=engine.device)
y = torch.tensor([[1.0], [0.0]], device=engine.device)
res = engine(x, offsets)
engine.backward(loss(res, y))
engine.step()
results = [engine.all_gather_scalar(i, groups._get_data_parallel_group()) for i in model.emb.parameters()]
for res in results:
assert torch.allclose(res[0], res[1])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import random
from deepspeed.runtime.sparse_tensor import SparseTensor
def test_csr_addition_self():
row_count = 10
random.seed(1234)
x = torch.ones(1, 5)
for i in range(row_count - 1):
if random.random() > 0.75:
x = torch.cat([x, torch.ones(1, 5)])
else:
x = torch.cat([x, torch.zeros(1, 5)])
dense_x = x.clone()
cx = SparseTensor(x)
assert torch.all(dense_x == cx.to_dense())
cx.add(cx)
assert torch.all(dense_x + dense_x == cx.to_dense())
def test_csr_addition_different():
row_count = 10
random.seed(1234)
x = torch.ones(1, 5)
for i in range(row_count - 1):
if random.random() > 0.75:
x = torch.cat([x, torch.ones(1, 5)])
else:
x = torch.cat([x, torch.zeros(1, 5)])
dense_x = x.clone()
cx = SparseTensor(x)
y = torch.ones(1, 5)
for i in range(row_count - 1):
if random.random() > 0.75:
y = torch.cat([y, torch.ones(1, 5)])
else:
y = torch.cat([y, torch.zeros(1, 5)])
dense_y = y.clone()
cy = SparseTensor(y)
dense_sum = dense_x + dense_y
cx.add(cy)
assert torch.all(dense_sum == cx.to_dense())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import deepspeed.runtime.pipe.schedule as schedule
def _count_type(cmds, classtype):
return len(list(filter(lambda c: type(c) == classtype, cmds)))
def test_pipe_inference_schedule_singlestage():
sched = schedule.InferenceSchedule(micro_batches=4, stages=1, stage_id=0)
assert sched.num_micro_batches == 4
full = list(iter(sched))
for idx, cmds in enumerate(full):
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
assert len(full) == sched.num_micro_batches
def test_pipe_train_schedule_singlestage():
sched = schedule.TrainSchedule(micro_batches=4, stages=1, stage_id=0)
assert sched.num_micro_batches == 4
full = list(iter(sched))
for idx, cmds in enumerate(full):
if (idx % 2) != 0:
assert (len(cmds) == 1) or (len(cmds) == 4)
assert type(cmds[0]) == schedule.BackwardPass
else:
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
assert len(full) == sched.num_micro_batches * 2
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def test_pipe_inference_schedule_firststage(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=0)
assert sched.num_micro_batches == micro_batches
full = list(iter(sched))
for idx, cmds in enumerate(full):
# Ensure we don't send an activation the first step
if idx == 0:
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
continue
# the last active step is only a send
if idx == sched.num_micro_batches:
assert len(cmds) == 1
assert type(cmds[0]) == schedule.SendActivation
continue
# no work later on
if idx > sched.num_micro_batches:
assert len(cmds) == 0
continue
# Normally we need to load/forward/send
assert len(cmds) == 3
assert _count_type(cmds, schedule.LoadMicroBatch) == 1
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.SendActivation) == 1
assert len(full) == micro_batches + stages - 1
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def test_pipe_inference_schedule_midstage(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=1)
full = list(iter(sched))
for idx, cmds in enumerate(full):
if idx < sched.stage:
assert len(cmds) == 0
continue
if idx == sched.stage + sched.num_micro_batches:
assert len(cmds) == 1
assert type(cmds[0]) == schedule.SendActivation
continue
if idx > sched.stage + sched.num_micro_batches:
assert len(cmds) == 0
continue
assert _count_type(cmds, schedule.LoadMicroBatch) == 0
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.RecvActivation) == 1
if idx > sched.stage:
assert _count_type(cmds, schedule.SendActivation) == 1
assert len(full) == micro_batches + stages - 1
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def test_pipe_inference_schedule_laststage(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=2)
full = list(iter(sched))
for idx, cmds in enumerate(full):
if idx < sched.stage or idx > sched.stage + sched.num_micro_batches:
assert len(cmds) == 0
continue
assert _count_type(cmds, schedule.LoadMicroBatch) == 1
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.RecvActivation) == 1
assert _count_type(cmds, schedule.SendActivation) == 0
assert len(full) == micro_batches + stages - 1
def test_pipe_schedule_firststage():
sched = schedule.TrainSchedule(micro_batches=8, stages=3, stage_id=0)
for cmds in sched:
assert all(instr.__class__ != schedule.SendGrad for instr in cmds)
assert all(instr.__class__ != schedule.RecvActivation for instr in cmds)
for instr in cmds:
if isinstance(instr, schedule.BufferOpInstruction):
assert 0 <= instr.buffer_id < sched.num_pipe_buffers()
def test_pipe_schedule_laststage():
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2)
assert len(list(iter(sched))) == 2 * (sched.micro_batches + sched.stages - 1)
for cmds in sched:
assert all(instr.__class__ != schedule.SendActivation for instr in cmds)
assert all(instr.__class__ != schedule.RecvGrad for instr in cmds)
def test_pipe_stagequery():
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=0)
assert sched.is_first_stage
assert not sched.is_last_stage
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=1)
assert not sched.is_first_stage
assert not sched.is_last_stage
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2)
assert not sched.is_first_stage
assert sched.is_last_stage
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch.nn as nn
import pytest
import deepspeed.comm as dist
from deepspeed.runtime.pipe.topology import PipeDataParallelTopology
from deepspeed.runtime.pipe.module import PipelineModule
from unit.alexnet_model import AlexNetPipe, train_cifar
from unit.common import DistributedTest
from unit.util import skip_on_arch
PipeTopo = PipeDataParallelTopology
def rel_diff(A, B):
return abs(A - B) / abs(A)
@pytest.mark.parametrize('topo_config', [
{
"num_pp": 1,
"num_dp": 4
},
{
"num_pp": 2,
"num_dp": 2
},
{
"num_pp": 4,
"num_dp": 1
},
])
class TestPipeCifar10(DistributedTest):
world_size = 4
def test(self, topo_config):
skip_on_arch(min_arch=7)
config_dict = {
"train_batch_size": 16,
"train_micro_batch_size_per_gpu": 4,
"steps_per_print": 20,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"zero_optimization": {
"stage": 0
},
"fp16": {
"enabled": False
},
"pipeline": {
"seed_layers": True,
"activation_checkpoint_interval": 1
}
}
topo = PipeTopo(**topo_config)
steps = 500 # must be >=100
# Allocate model for consistent initial weights.
init_net = AlexNetPipe()
base_net = copy.deepcopy(init_net)
base_model = PipelineModule(layers=base_net.to_layers(), num_stages=1, loss_fn=nn.CrossEntropyLoss())
# Train with just data parallelism
base_losses = train_cifar(base_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled'])
test_net = copy.deepcopy(init_net)
test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss())
test_losses = train_cifar(test_model, config=config_dict, num_steps=steps, fp16=config_dict['fp16']['enabled'])
abs_diffs = [l0 - l1 for l0, l1 in zip(base_losses, test_losses)]
rel_diffs = [rel_diff(l0, l1) for l0, l1 in zip(base_losses, test_losses)]
if dist.get_rank() == 0:
print(f'abs min={min(abs_diffs)} max={max(abs_diffs)} avg={sum(abs_diffs)/len(abs_diffs)}')
print(f'rel min={min(rel_diffs)} max={max(rel_diffs)} avg={sum(rel_diffs)/len(rel_diffs)}')
print(f'first: base={base_losses[0]} test={test_losses[0]} abs={abs_diffs[0]} rel={rel_diffs[0]}')
for lastX in [1, 10, 100]:
base_avg = sum(base_losses[-lastX:]) / lastX
test_avg = sum(test_losses[-lastX:]) / lastX
print(
f'last-{lastX}: base={base_avg} test={test_avg} abs={base_avg - test_avg} rel={rel_diff(base_avg, test_avg)}'
)
lastX = 100
base = base_losses[-lastX:]
base_avg = sum(base) / len(base)
test = test_losses[-lastX:]
test_avg = sum(test) / len(test)
assert rel_diff(base_avg, test_avg) < 0.05 # Originally 0.03, but seeing instability with AMD results
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed.comm as dist
from deepspeed.runtime.pipe.topology import PipelineParallelGrid as Grid
from deepspeed.runtime.pipe.topology import ProcessTopology as Topo
from deepspeed.runtime.pipe.topology import _prime_factors
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
def test_topology_2d():
topo = Topo(axes=['row', 'col'], dims=[2, 2])
assert topo.world_size() == 4
assert topo.get_rank(row=0, col=0) == 0
assert topo.get_rank(row=0, col=1) == 1
assert topo.get_rank(row=1, col=0) == 2
assert topo.get_rank(row=1, col=1) == 3
assert topo.get_axis_list(axis='row', idx=0) == [0, 1]
assert topo.get_axis_list(axis='row', idx=1) == [2, 3]
assert topo.get_axis_list(axis='col', idx=0) == [0, 2]
assert topo.get_axis_list(axis='col', idx=1) == [1, 3]
def test_topology_dims():
topo = Topo(axes=['a', 'b', 'c'], dims=[2, 3, 4])
assert topo.world_size() == 24
assert topo.get_dim('a') == 2
assert topo.get_dim('b') == 3
assert topo.get_dim('c') == 4
def test_topology_match():
topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
print(topo.filter_match(pipe=0, data=1))
assert topo.filter_match(pipe=0, data=1) == [2, 3]
print([topo.get_coord(r) for r in topo.filter_match(pipe=0, data=1)])
def test_topology_rank_repr():
topo = Topo(axes=['a', 'b'], dims=[2, 2])
assert topo.get_rank_repr(rank=0) == 'a_00-b_00'
assert topo.get_rank_repr(rank=1) == 'a_00-b_01'
assert topo.get_rank_repr(rank=2) == 'a_01-b_00'
assert topo.get_rank_repr(rank=3) == 'a_01-b_01'
assert topo.get_rank_repr(rank=3, inner_sep='+') == 'a+01-b+01'
assert topo.get_rank_repr(rank=3, inner_sep='🤗', outer_sep='_JEFF_') == 'a🤗01_JEFF_b🤗01'
topo = Topo(axes=['pipe', 'data'], dims=[2, 2])
assert topo.get_rank_repr(rank=0) == ''
assert topo.get_rank_repr(rank=1) == ''
assert topo.get_rank_repr(rank=2) == ''
assert topo.get_rank_repr(rank=3) == ''
assert topo.get_rank_repr(rank=0, omit_axes=['pipe']) == 'data_00'
assert topo.get_rank_repr(rank=1, omit_axes=['pipe']) == 'data_01'
assert topo.get_rank_repr(rank=2, omit_axes=['pipe']) == 'data_00'
assert topo.get_rank_repr(rank=3, omit_axes=['pipe']) == 'data_01'
assert topo.get_rank_repr(rank=0, omit_axes=[]) == 'pipe_00-data_00'
assert topo.get_rank_repr(rank=1, omit_axes=[]) == 'pipe_00-data_01'
assert topo.get_rank_repr(rank=2, omit_axes=[]) == 'pipe_01-data_00'
assert topo.get_rank_repr(rank=3, omit_axes=[]) == 'pipe_01-data_01'
topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
assert topo.get_rank_repr(rank=0) == 'model_00'
assert topo.get_rank_repr(rank=1) == 'model_01'
assert topo.get_rank_repr(rank=2) == 'model_00'
assert topo.get_rank_repr(rank=3) == 'model_01'
assert topo.get_rank_repr(rank=4) == 'model_00'
assert topo.get_rank_repr(rank=5) == 'model_01'
assert topo.get_rank_repr(rank=6) == 'model_00'
assert topo.get_rank_repr(rank=7) == 'model_01'
def test_topology_3d():
topo = Topo(axes=['a', 'b', 'c'], dims=[2, 2, 2])
assert topo.get_rank(a=0, b=0, c=0) == 0
assert topo.get_rank(a=0, b=0, c=1) == 1
assert topo.get_rank(a=0, b=1, c=0) == 2
assert topo.get_rank(a=0, b=1, c=1) == 3
assert topo.get_rank(a=1, b=0, c=0) == 4
assert topo.get_rank(a=1, b=0, c=1) == 5
assert topo.get_rank(a=1, b=1, c=0) == 6
assert topo.get_rank(a=1, b=1, c=1) == 7
assert topo.get_axis_list('a', 0) == [0, 1, 2, 3]
assert topo.get_axis_list('a', 1) == [4, 5, 6, 7]
assert topo.get_axis_list('b', 0) == [0, 1, 4, 5]
assert topo.get_axis_list('b', 1) == [2, 3, 6, 7]
assert topo.get_axis_list('c', 0) == [0, 2, 4, 6]
assert topo.get_axis_list('c', 1) == [1, 3, 5, 7]
assert topo.get_coord(0) == topo.ProcessCoord(0, 0, 0)
assert topo.get_coord(1) == topo.ProcessCoord(0, 0, 1)
assert topo.get_coord(2) == topo.ProcessCoord(0, 1, 0)
assert topo.get_coord(3) == topo.ProcessCoord(0, 1, 1)
assert topo.get_coord(4) == topo.ProcessCoord(1, 0, 0)
assert topo.get_coord(5) == topo.ProcessCoord(1, 0, 1)
assert topo.get_coord(6) == topo.ProcessCoord(1, 1, 0)
assert topo.get_coord(7) == topo.ProcessCoord(1, 1, 1)
assert topo.filter_match(a=0) == [0, 1, 2, 3]
assert topo.filter_match(b=1, c=1) == [3, 7]
assert topo.filter_match(a=1, b=1, c=1) == [7]
# Easy access method
assert topo.get_coord(0).a == 0
def test_topology_comm_list():
topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
assert topo.get_rank(pipe=0, data=0, model=0) == 0
assert topo.get_rank(pipe=0, data=0, model=1) == 1
assert topo.get_rank(pipe=0, data=1, model=0) == 2
assert topo.get_rank(pipe=0, data=1, model=1) == 3
assert topo.get_rank(pipe=1, data=0, model=0) == 4
assert topo.get_rank(pipe=1, data=0, model=1) == 5
assert topo.get_rank(pipe=1, data=1, model=0) == 6
assert topo.get_rank(pipe=1, data=1, model=1) == 7
pipe_list = [
[0, 4], # data=0, model=0
[1, 5], # data=0, model=1
[2, 6], # data=1, model=0
[3, 7], # data=1, model=1
]
assert topo.get_axis_comm_lists('pipe') == pipe_list
data_list = [
[0, 2], # pipe=0, model=0
[1, 3], # pipe=0, model=1
[4, 6], # pipe=1, model=0
[5, 7], # pipe=1, model=1
]
assert topo.get_axis_comm_lists('data') == data_list
model_list = [
[0, 1], # pipe=0, data=0
[2, 3], # pipe=0, data=1
[4, 5], # pipe=1, data=0
[6, 7], # pipe=1, data=1
]
assert topo.get_axis_comm_lists('model') == model_list
# Handle nonsense. We don't want to RuntimeError because it allows us to write more
# generalized code for data/model/pipe parallelism
assert topo.get_axis_comm_lists('jeff') == []
class TestDistributedTopology(DistributedTest):
world_size = 4
def test_grid_pipe_data(self):
topo = Topo(axes=['pipe', 'data'], dims=[2, 2])
grid = Grid(topology=topo)
assert grid._is_grid_valid()
rank = dist.get_rank()
assert grid.is_first_stage == (grid.get_stage_id() == 0)
assert grid.is_last_stage == (grid.get_stage_id() == grid.get_pipe_parallel_world_size() - 1)
# Test collectives along the pipeline parallel process groups
rank_tensor = torch.LongTensor(data=[rank]).to(get_accelerator().device_name())
dist.all_reduce(rank_tensor, group=grid.get_pipe_parallel_group())
pipe_group = grid.pp_group
assert torch.all(rank_tensor == sum(pipe_group))
# Test collectives along the data parallel process groups
rank_tensor = torch.LongTensor(data=[rank]).to(get_accelerator().device_name())
dist.all_reduce(rank_tensor, group=grid.get_data_parallel_group())
data_group = grid.dp_group
assert torch.all(rank_tensor == sum(data_group))
def test_stage_to_global(self):
topo = Topo(axes=['pipe', 'data'], dims=[2, 2])
grid = Grid(topology=topo)
assert grid._is_grid_valid()
assert grid.stage_to_global(stage_id=0, data=0) == 0
assert grid.stage_to_global(stage_id=0, data=1) == 1
assert grid.stage_to_global(stage_id=1, data=0) == 2
assert grid.stage_to_global(stage_id=1, data=1) == 3
me = topo.get_coord(rank=dist.get_rank())
if me.data == 0:
assert grid.stage_to_global(stage_id=0) == 0
assert grid.stage_to_global(stage_id=1) == 2
else:
assert grid.stage_to_global(stage_id=0) == 1
assert grid.stage_to_global(stage_id=1) == 3
def test_primes():
""" Test prime factorizations. """
def _product(ps):
p = 1
for num in ps:
p *= num
return p
with pytest.raises(ValueError):
_prime_factors(0)
for x in range(1, 30):
primes = _prime_factors(x)
assert _product(primes) == x
for p in primes:
assert _prime_factors(p) == [p]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
unit tests for coalesced collectives
"""
import torch
import deepspeed.comm as dist
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
class TestReduceScatterCoalesced(DistributedTest):
world_size = 2
def test_single_input(self):
input = torch.full((6, ), dist.get_rank(), dtype=torch.half, device=get_accelerator().current_device_name())
(output, ) = reduce_scatter_coalesced([input], dist.get_world_group())
assert output.shape == (3, )
assert torch.allclose(output, torch.full_like(output, 0.5))
def test_two_inputs(self):
tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": torch.half}
inputs = [
dist.get_rank() * torch.arange(0, 6, **tensor_kwargs),
dist.get_rank() * torch.arange(6, 9, **tensor_kwargs),
]
output1, output2 = reduce_scatter_coalesced(inputs, dist.get_world_group())
if dist.get_rank() == 0:
assert output1.shape == (3, )
assert torch.allclose(output1, torch.arange(0, 3, **tensor_kwargs) / 2)
assert output2.shape == (2, )
assert torch.allclose(output2, torch.arange(6, 8, **tensor_kwargs) / 2)
elif dist.get_rank() == 1:
assert output1.shape == (3, )
assert torch.allclose(output1, torch.arange(3, 6, **tensor_kwargs) / 2)
assert output2.shape == (1, )
assert torch.allclose(output2, torch.arange(8, 9, **tensor_kwargs) / 2)
class TestReduceScatterCoalescedTensorSmallerThanWorldSize(DistributedTest):
world_size = 2
def test(self):
input = torch.zeros((1, ), dtype=torch.half, device=get_accelerator().current_device_name())
(output, ) = reduce_scatter_coalesced([input], dist.get_world_group())
if dist.get_rank() == 0:
assert output.shape == (1, )
assert torch.allclose(output, torch.zeros_like(output))
elif dist.get_rank() == 1:
assert output.shape == (0, )
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed.comm as dist
from deepspeed.runtime.utils import partition_uniform
from deepspeed.runtime.utils import partition_balanced
from deepspeed.runtime.utils import prefix_sum_inc
from deepspeed.runtime.utils import PartitionedTensor
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
class TestPartitionedTensor(DistributedTest):
world_size = 4
def test(self):
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 4
cols = 3
full = torch.rand(rows, cols).to(get_accelerator().device_name())
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
assert len(part.local_size()) == 1
assert part.local_size()[0] * world == full.numel()
reconstructed = part.full()
assert torch.equal(full, reconstructed)
class TestPartitionedTensorMeta(DistributedTest):
world_size = 4
def test(self):
world = dist.get_world_size()
rank = dist.get_rank()
group = dist.new_group(ranks=list(range(world)))
rows = world * 7
cols = 3
full = torch.rand(rows, cols).to(get_accelerator().device_name())
dist.broadcast(full, src=0, group=group)
part = PartitionedTensor(full, group=group)
my_meta = PartitionedTensor.from_meta(part.to_meta(), part.local_data, group)
assert torch.equal(full, my_meta.full())
def assert_valid_partition(weights, parts, P):
N = len(weights)
assert len(parts) == P + 1
assert parts[0] == 0
assert parts[P] == N
for idx in range(P):
assert parts[idx] <= parts[idx + 1]
def get_partition_weights(weights, parts):
""" Return the amount of weight in each partition. """
costs = [0] * (len(parts) - 1)
P = len(parts) - 1
for p in range(P):
start = parts[p]
stop = parts[p + 1]
costs[p] = sum(weights[start:stop])
return costs
def test_prefix_sum():
x = [3, 4, 5]
psum = prefix_sum_inc(x)
assert psum == [3, 7, 12]
def test_valid_partition():
N = 10
P = 1
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_short_partition_uniform():
N = 2
P = 4
weights = [1] * N
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
def test_short_partition():
N = 2
P = 4
weights = [1] * N
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
def test_easy_balance_uniform():
weights = [1] * 8
P = 4
parts = partition_uniform(len(weights), P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs)
def test_easy_balance_balanced():
weights = [1] * 8
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 2 for c in costs), costs
def test_int_balanced():
weights = [0, 1, 2, 3, 3, 3]
P = 4
parts = partition_balanced(weights, P)
assert parts == [0, 3, 4, 5, 6]
assert_valid_partition(weights, parts, P)
costs = get_partition_weights(weights, parts)
assert all(c == 3 for c in costs)
def test_float_balanced():
weights = [0., 1.1, 1.9, 3., 3., 3.]
P = 4
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 3, 4, 5, 6]
@pytest.mark.skip(reason="Variance-minimizing partitioning returns different result.")
def test_float_lastheavy():
weights = [0., 1.1, 1.9, 3., 30.]
P = 2
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 4, 5]
def test_float_midheavy():
weights = [0., 1.1, 30, 3.]
P = 3
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
assert parts == [0, 2, 3, 4]
def test_balance_bert():
# Parameters per layer for a transformer model with 24 transformers and hidden dim 1024
weights = [
52559872, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224,
12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224, 12596224,
12596224, 12596224, 12596224, 0, 52559872
]
P = 8
parts = partition_balanced(weights, P)
assert_valid_partition(weights, parts, P)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig, DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig
def test_zero_config_deprecatedfields():
config = DeepSpeedZeroConfig(**{"cpu_offload_param": True})
assert isinstance(config.offload_param, DeepSpeedZeroOffloadParamConfig)
config = DeepSpeedZeroConfig(**{"cpu_offload": True})
assert isinstance(config.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig)
config = DeepSpeedZeroConfig(**{"stage3_gather_fp16_weights_on_model_save": True})
assert config.gather_16bit_weights_on_model_save == True
def test_zero_config_aliasfields():
config = DeepSpeedZeroConfig(**{"stage3_prefetch_bucket_size": 12345})
assert config.prefetch_bucket_size == 12345
config = DeepSpeedZeroConfig(**{"stage3_param_persistence_threshold": 12345})
assert config.param_persistence_threshold == 12345
config = DeepSpeedZeroConfig(**{"stage3_max_reuse_distance": 12345})
assert config.max_reuse_distance == 12345
config = DeepSpeedZeroConfig(**{"stage3_gather_16bit_weights_on_model_save": True})
assert config.gather_16bit_weights_on_model_save == True
def test_zero_config_overlapcomm():
for stage in [0, 1, 2]:
config = DeepSpeedZeroConfig(**{"stage": stage})
assert config.overlap_comm == False
config = DeepSpeedZeroConfig(**{"stage": 3})
assert config.overlap_comm == True
def test_zero_config_offload_configs():
config = DeepSpeedZeroConfig()
assert config.offload_param == None
assert config.offload_optimizer == None
config = DeepSpeedZeroConfig(**{"offload_param": None, "offload_optimizer": None})
assert config.offload_param == None
assert config.offload_optimizer == None
config = DeepSpeedZeroConfig(**{"offload_param": {}, "offload_optimizer": {}})
assert isinstance(config.offload_param, DeepSpeedZeroOffloadParamConfig)
assert isinstance(config.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig)
def test_zero_offload_optimizer_config_pipeline():
config = DeepSpeedZeroOffloadOptimizerConfig()
assert config.pipeline == False
config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": True, "pipeline_write": False})
assert config.pipeline == True
config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": False, "pipeline_write": True})
assert config.pipeline == True
config = DeepSpeedZeroOffloadOptimizerConfig(**{"pipeline_read": True, "pipeline_write": True})
assert config.pipeline == True
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
from deepspeed.runtime.zero.tiling import TiledLinear, TiledLinearReturnBias
import pytest
@pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2), (5, 5), (32, 32)])
def test_tiled_init(in_splits, out_splits):
in_f = 32
out_f = 40
base = torch.nn.Linear(in_f, out_f, bias=True)
l = TiledLinear(in_f,
out_f,
bias=True,
init_linear=copy.deepcopy(base),
out_splits=out_splits,
in_splits=in_splits)
for out_id in range(out_splits):
for in_id in range(in_splits):
local_l = l.linears[out_id][in_id]
assert isinstance(local_l, torch.nn.Linear)
rstart = l.out_parts[out_id]
rstop = l.out_parts[out_id + 1]
cstart = l.in_parts[in_id]
cstop = l.in_parts[in_id + 1]
local_out = rstop - rstart
local_in = cstop - cstart
assert local_l.weight.size()[1] == local_in, f'local[{out_id}][{in_id}].size {local_l.weight.size()}'
assert local_l.weight.size()[0] == local_out
test = base.weight[rstart:rstop, cstart:cstop]
assert local_l.weight.size() == test.size()
assert torch.equal(local_l.weight.data, test.data)
if in_id == in_splits - 1:
assert local_l.bias is not None
assert local_l.bias.size()[0] == local_out
else:
assert local_l.bias is None
@pytest.mark.parametrize('in_splits,out_splits', [(0, 0), (33, 33)])
def test_tiled_baddim(in_splits, out_splits):
dim = 32
with pytest.raises(RuntimeError):
l = TiledLinear(dim, dim, out_splits=out_splits, in_splits=in_splits)
@pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now")
@pytest.mark.parametrize('bias', [False, True])
@pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)])
@pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)])
def test_tiled_forward(in_splits, out_splits, bias, in_f, out_f):
base = torch.nn.Linear(in_f, out_f, bias=bias)
test = TiledLinear(in_f,
out_f,
bias=bias,
init_linear=copy.deepcopy(base),
out_splits=out_splits,
in_splits=in_splits)
inp = torch.rand(in_f)
base_out = base(copy.deepcopy(inp))
test_out = test(copy.deepcopy(inp))
assert torch.allclose(base_out, test_out, rtol=1e-4)
@pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now")
@pytest.mark.parametrize('bias', [False, True])
@pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)])
@pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)])
def test_tiled_backward(in_splits, out_splits, bias, in_f, out_f):
base = torch.nn.Linear(in_f, out_f, bias=bias)
test = TiledLinear(in_f,
out_f,
bias=bias,
init_linear=copy.deepcopy(base),
out_splits=out_splits,
in_splits=in_splits)
inp = torch.rand(in_f)
base_out = base(copy.deepcopy(inp))
test_out = test(copy.deepcopy(inp))
assert torch.allclose(base_out, test_out, rtol=1e-4)
base_out.sum().backward()
test_out.sum().backward()
# compare grads
for row in range(out_splits):
rstart = test.out_parts[row]
rstop = test.out_parts[row + 1]
for col in range(in_splits):
cstart = test.in_parts[col]
cstop = test.in_parts[col + 1]
local = test.linears[row][col]
base_grad = base.weight.grad[rstart:rstop, cstart:cstop]
assert torch.allclose(base_grad, local.weight.grad, rtol=1e-4)
if local.bias is not None:
base_grad = base.bias.grad[rstart:rstop]
assert torch.allclose(base_grad, local.bias.grad, rtol=1e-4)
class LinearWrapper(torch.nn.Linear):
"""Returns its own bias to simulate Megatron-LM's behavior.
Megatron-LM optionally delays the bias addition to fuse with a proceeding kernel.
"""
def forward(self, input):
out = super().forward(input)
return out, self.bias
@pytest.mark.skip(reason="seeing nondeterministic failures, skipping for now")
@pytest.mark.parametrize('bias', [False, True])
@pytest.mark.parametrize('in_splits,out_splits', [(1, 1), (2, 2)])
@pytest.mark.parametrize('in_f,out_f', [(32, 32), (23, 29), (29, 23)])
def test_tiled_returnbias_backward(in_splits, out_splits, bias, in_f, out_f):
base = LinearWrapper(in_f, out_f, bias=bias)
test = TiledLinearReturnBias(in_f,
out_f,
bias=bias,
linear_cls=LinearWrapper,
init_linear=copy.deepcopy(base),
out_splits=out_splits,
in_splits=in_splits)
inp = torch.rand(in_f)
base_out_t, base_out_b = base(copy.deepcopy(inp))
test_out_t, test_out_b = test(copy.deepcopy(inp))
assert torch.allclose(base_out_t, test_out_t, rtol=1e-4)
if base_out_b is None:
assert test_out_b is None
base_out_b = torch.zeros_like(base_out_t)
test_out_b = torch.zeros_like(test_out_t)
else:
assert test_out_b is not None
assert torch.allclose(base_out_b, test_out_b, rtol=1e-4)
(base_out_t + base_out_b).sum().backward()
(test_out_t + test_out_b).sum().backward()
# compare grads
for row in range(out_splits):
rstart = test.out_parts[row]
rstop = test.out_parts[row + 1]
for col in range(in_splits):
cstart = test.in_parts[col]
cstop = test.in_parts[col + 1]
local = test.linears[row][col]
base_grad = base.weight.grad[rstart:rstop, cstart:cstop]
assert torch.allclose(base_grad, local.weight.grad, rtol=1e-4)
if local.bias is not None:
base_grad = base.bias.grad[rstart:rstop]
assert torch.allclose(base_grad, local.bias.grad, rtol=1e-4)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
from collections import namedtuple
from typing import Dict, List, NamedTuple, Set, Tuple
import pytest
import deepspeed.comm as dist
import torch
from torch import Tensor
from torch.nn import Linear, Module
from torch.nn.modules.container import ModuleList
from torch.nn.modules.loss import L1Loss
from torch.nn.parameter import Parameter
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, random_dataloader
import deepspeed
from deepspeed.runtime.engine import DeepSpeedEngine
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
from deepspeed.runtime.zero.utils import ZeRORuntimeException
from deepspeed.accelerator import get_accelerator
def run_unbalanced_gradients(model, data_loader):
def drop_some_gradients(model, iter):
odd_iteration = iter % 2
for i, p in enumerate(model.parameters()):
p.requires_grad = (i % 2) == odd_iteration
def enable_grads(model):
for p in model.parameters():
p.requires_grad = True
for i, batch in enumerate(data_loader):
drop_some_gradients(model, i + 1)
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
enable_grads(model)
def dump_state_dict(model):
if dist.get_rank() == 0:
print("state_dict:")
for name, param in model.named_parameters():
print(f"{name} {param.data}")
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
class TestZeroUnbalancedGradients(DistributedTest):
world_size = 1
def test(self, zero_stage):
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
model = SimpleModel(hidden_dim=hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device)
run_unbalanced_gradients(model, data_loader)
# testing the fix https://github.com/microsoft/DeepSpeed/pull/1227
class TestZero3RepeatForwardLoop(DistributedTest):
world_size = 1
def test(self, zero_stage=3):
# force all params to be partitioned by forcing threshold=0
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
"stage3_param_persistence_threshold": 0
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
class AlbertLikeModel(torch.nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
# run the same layer multiple times in a loop - to test a stack of forwards, followed by a stack of backwards
hidden = x
for i in range(3):
hidden = hidden + self.linear(hidden)
return self.cross_entropy_loss(hidden, y)
model = AlbertLikeModel(hidden_dim=hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# testing the fix https://github.com/microsoft/DeepSpeed/pull/1227
# also reproduces the https://github.com/microsoft/DeepSpeed/pull/1372
@pytest.mark.parametrize('zero_stage', [2, 3])
@pytest.mark.parametrize('freeze_params', [True, False])
class TestZeroToFP32(DistributedTest):
world_size = 2
def test_1_param_group(self, tmpdir, zero_stage, freeze_params):
# XXX: ideally refactor with the 2_param_group test as 75% is the same
# force all params to be partitioned by forcing threshold=0
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
"stage3_param_persistence_threshold": 0
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim, n_layers, freeze_params):
super().__init__()
# to reproduce https://github.com/microsoft/DeepSpeed/pull/1372 it is important that
# the number of total elements is uneven:
# (1) 4 layers of 3*(3+1)=12 elements each, 48 in total
self.ll = torch.nn.ModuleList(torch.nn.Linear(hidden_dim, hidden_dim) for i in range(n_layers))
# (2) the following adds 4+1=5 elements
self.classifier = torch.nn.Linear(4, 1)
# total 48+5=53 (uneven as desired) elements
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
if freeze_params:
self.ll[0].weight.requires_grad = False
self.ll[0].bias.requires_grad = False
def forward(self, x, y):
hidden = x
for l in self.ll:
hidden = l(hidden)
return self.cross_entropy_loss(hidden, y)
hidden_dim = 3 # do not change
world_size = dist.get_world_size()
# we want at least 2x layers as there are gpus to trigger round_robin_fp16_groups reshuffle in zero2
n_layers = world_size * 2
model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# Flush zero stage 3 cache
model.empty_partition_cache()
data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
model.empty_partition_cache()
model.save_checkpoint(tmpdir)
# make sure all sides saved it
dist.barrier()
orig_state_dict = {}
for name, param in model.module.named_parameters():
if zero_stage == 3:
with deepspeed.zero.GatheredParameters(param, modifier_rank=None):
orig_state_dict[name] = param.detach().cpu()
else:
orig_state_dict[name] = param.detach().cpu()
if zero_stage == 3:
with deepspeed.zero.GatheredParameters(model.parameters(), modifier_rank=None):
fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir)
fp32_state_dict = fp32_model.state_dict()
else:
fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir)
fp32_state_dict = fp32_model.state_dict()
#dump_state_dict(fp32_model)
if dist.get_rank() == 0:
for name in orig_state_dict.keys():
# float() workaround for torch<1.6
assert torch.allclose(orig_state_dict[name].float(), fp32_state_dict[name].float())
def test_2_param_groups(self, tmpdir, zero_stage, freeze_params):
# TODO:
# - need to test with multiple param groups
# force all params to be partitioned by forcing threshold=0
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_allow_untested_optimizer": 1,
"zero_optimization": {
"stage": zero_stage,
"stage3_param_persistence_threshold": 0
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim, n_layers, freeze_params):
super().__init__()
self.ll = torch.nn.ModuleList(torch.nn.Linear(hidden_dim, hidden_dim) for i in range(n_layers))
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
if freeze_params:
self.ll[0].weight.requires_grad = False
self.ll[0].bias.requires_grad = False
def forward(self, x, y):
hidden = x
for l in self.ll:
hidden = l(hidden)
return self.cross_entropy_loss(hidden, y)
hidden_dim = 3
world_size = dist.get_world_size()
n_layers = world_size * 2
model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params)
optim_groups = [
{
"params": [l.weight for l in model.ll],
"weight_decay": 0.01,
},
{
"params": [l.bias for l in model.ll],
"weight_decay": 0.0
},
]
optim = torch.optim.SGD(optim_groups, lr=0.1)
model, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
optimizer=optim,
config=config_dict)
model.empty_partition_cache()
data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
model.empty_partition_cache()
model.save_checkpoint(tmpdir)
# make sure all sides saved it
dist.barrier()
#dump_state_dict(model)
orig_state_dict = {}
for name, param in model.module.named_parameters():
if zero_stage == 3:
with deepspeed.zero.GatheredParameters(param, modifier_rank=None):
orig_state_dict[name] = param.detach().cpu()
else:
orig_state_dict[name] = param.detach().cpu()
if zero_stage == 3:
with deepspeed.zero.GatheredParameters(model.parameters(), modifier_rank=None):
fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir)
fp32_state_dict = fp32_model.state_dict()
else:
fp32_model = load_state_dict_from_zero_checkpoint(model.module, tmpdir)
fp32_state_dict = fp32_model.state_dict()
#dump_state_dict(fp32_model)
if dist.get_rank() == 0:
for name in orig_state_dict.keys():
# float() workaround for torch<1.6
assert torch.allclose(orig_state_dict[name].float(), fp32_state_dict[name].float())
@pytest.mark.parametrize("allgather_bucket_size", [1000, 1001])
class TestIncorectAllgatherBucketSize(DistributedTest):
world_size = 1
def test(self, allgather_bucket_size, zero_stage=2):
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
"allgather_bucket_size": allgather_bucket_size
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
model = SimpleModel(hidden_dim=hidden_dim)
if allgather_bucket_size % 2 == 0:
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
else:
with pytest.raises(AssertionError) as assertinfo:
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
assert "allgather_bucket_size must be a multiple of nccl_start_alignment_factor" in str(assertinfo)
class TestPartitionNcclAlignment(DistributedTest):
world_size = 4
def test(self, zero_stage=2):
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
model = SimpleModel(hidden_dim=hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# get nccl all-gather send buffers alignment factor
nccl_start_alignment_factor = model.optimizer.nccl_start_alignment_factor
parallel_partitioned_bit16_groups = model.optimizer.parallel_partitioned_bit16_groups if zero_stage == 2 else model.optimizer.parallel_partitioned_fp16_groups
for data_parallel_partitions in parallel_partitioned_bit16_groups:
for partition_id, partitioned_data in enumerate(data_parallel_partitions):
# verify that data partition start locations are 4-byte aligned
assert (partitioned_data.data_ptr() % (2 * nccl_start_alignment_factor) == 0)
def _ds_initialize_for_param_partitioning_testing(model: Module, cfg: dict) -> DeepSpeedEngine:
ds_engine, _, _, _ = deepspeed.initialize(config=cfg, model=model, model_parameters=model.parameters())
return ds_engine
def _assert_partition_status(model: Module, valid_statuses: Set[ZeroParamStatus]) -> None:
for _, param in model.named_parameters():
assert param.ds_status in valid_statuses, param.ds_summary()
def _assert_fully_available(model: Module) -> None:
for _, param in model.named_parameters():
assert param.ds_status == ZeroParamStatus.AVAILABLE
class EltwiseMultiplicationModule(Module):
def __init__(self, weight: Parameter) -> None:
super().__init__()
self.weight = weight
def forward(self, x: Tensor) -> Tensor:
_assert_fully_available(self)
result = self.weight * x
return result
class EltwiseMultiplicationTestNetwork_Dict(Module):
"""used for testing purposes"""
def __init__(
self,
weight1: Parameter,
weight2: Parameter,
weight3: Parameter,
) -> None:
super().__init__()
self.__layer1 = EltwiseMultiplicationModule(weight1)
self.__layer2 = EltwiseMultiplicationModule(weight2)
self.__layer3 = EltwiseMultiplicationModule(weight3)
self.loss = L1Loss(reduction="none")
def forward(self, x: Tensor, y: Tensor, use_module_trace: bool, param_prefetching: bool) -> Dict[str, Tensor]:
_assert_partition_status(self,
{ZeroParamStatus.NOT_AVAILABLE, ZeroParamStatus.INFLIGHT, ZeroParamStatus.AVAILABLE}
if use_module_trace else {ZeroParamStatus.NOT_AVAILABLE})
pre_layer_expected_states = {
ZeroParamStatus.INFLIGHT if param_prefetching else ZeroParamStatus.NOT_AVAILABLE,
ZeroParamStatus.AVAILABLE,
}
post_layer_expected_states = {
ZeroParamStatus.AVAILABLE if param_prefetching else ZeroParamStatus.NOT_AVAILABLE,
}
_assert_partition_status(self.__layer1, pre_layer_expected_states)
hidden1 = self.__layer1(x)
_assert_partition_status(self.__layer1, post_layer_expected_states)
_assert_partition_status(self.__layer2, pre_layer_expected_states)
hidden2 = self.__layer2(hidden1)
_assert_partition_status(self.__layer2, post_layer_expected_states)
_assert_partition_status(self.__layer3, pre_layer_expected_states)
y_hat = self.__layer3(hidden2)
_assert_partition_status(self.__layer3, post_layer_expected_states)
loss = self.loss(y_hat, y)
_assert_partition_status(self,
{ZeroParamStatus.NOT_AVAILABLE, ZeroParamStatus.INFLIGHT, ZeroParamStatus.AVAILABLE}
if use_module_trace else {ZeroParamStatus.NOT_AVAILABLE})
return {
"hidden1": hidden1,
"hidden2": hidden2,
"y_hat": y_hat,
"loss": loss,
}
@staticmethod
def to_dict(outputs: Dict[str, Tensor]) -> Dict[str, Tensor]:
return outputs
class EltwiseMultiplicationNamedTuple(NamedTuple):
hidden1: Tensor
hidden2: Tensor
y_hat: Tensor
loss: Tensor
class EltwiseMultiplicationTestNetwork_NamedTuple(EltwiseMultiplicationTestNetwork_Dict):
def forward(self, *args, **kwargs) -> EltwiseMultiplicationNamedTuple:
outputs_dicts = super().forward(*args, **kwargs)
return EltwiseMultiplicationNamedTuple(hidden1=outputs_dicts['hidden1'],
hidden2=outputs_dicts['hidden2'],
y_hat=outputs_dicts['y_hat'],
loss=outputs_dicts['loss'])
@staticmethod
def to_dict(outputs: EltwiseMultiplicationNamedTuple) -> Dict[str, Tensor]:
return {
"hidden1": outputs.hidden1,
"hidden2": outputs.hidden2,
"y_hat": outputs.y_hat,
"loss": outputs.loss,
}
EltwiseMultiplication_namedtuple = namedtuple('EltwiseMultiplication_namedtuple',
['hidden1', 'hidden2', 'y_hat', 'loss'])
class EltwiseMultiplicationTestNetwork_namedtuple(EltwiseMultiplicationTestNetwork_Dict):
def forward(self, *args, **kwargs) -> EltwiseMultiplication_namedtuple:
outputs_dicts = super().forward(*args, **kwargs)
return EltwiseMultiplication_namedtuple(hidden1=outputs_dicts['hidden1'],
hidden2=outputs_dicts['hidden2'],
y_hat=outputs_dicts['y_hat'],
loss=outputs_dicts['loss'])
@staticmethod
def to_dict(outputs: EltwiseMultiplicationNamedTuple) -> Dict[str, Tensor]:
return {
"hidden1": outputs.hidden1,
"hidden2": outputs.hidden2,
"y_hat": outputs.y_hat,
"loss": outputs.loss,
}
class EltwiseMultiplicationTestNetwork_Tuple(EltwiseMultiplicationTestNetwork_Dict):
def forward(self, *args, **kwargs) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
outputs_dicts = super().forward(*args, **kwargs)
return (outputs_dicts['hidden1'], outputs_dicts['hidden2'], outputs_dicts['y_hat'], outputs_dicts['loss'])
@staticmethod
def to_dict(outputs: Tuple[Tensor, Tensor, Tensor, Tensor]) -> Dict[str, Tensor]:
return {
"hidden1": outputs[0],
"hidden2": outputs[1],
"y_hat": outputs[2],
"loss": outputs[3],
}
class EltwiseMultiplicationTestNetwork_List(EltwiseMultiplicationTestNetwork_Dict):
def forward(self, *args, **kwargs) -> List[Tensor]:
outputs_dicts = super().forward(*args, **kwargs)
return [outputs_dicts['hidden1'], outputs_dicts['hidden2'], outputs_dicts['y_hat'], outputs_dicts['loss']]
@staticmethod
def to_dict(outputs: List[Tensor]) -> Dict[str, Tensor]:
return {
"hidden1": outputs[0],
"hidden2": outputs[1],
"y_hat": outputs[2],
"loss": outputs[3],
}
@pytest.mark.parametrize("param_persistence_threshold", [0, 10])
@pytest.mark.parametrize("fp16_enabled", [True, False])
@pytest.mark.parametrize("contiguous_gradients", [True, False])
@pytest.mark.parametrize("offload_optimizer", [True, False])
@pytest.mark.parametrize("zero_grad", [True, False])
@pytest.mark.parametrize("prefetching", [True, False])
@pytest.mark.parametrize("model_class", [
EltwiseMultiplicationTestNetwork_Dict, EltwiseMultiplicationTestNetwork_NamedTuple,
EltwiseMultiplicationTestNetwork_namedtuple, EltwiseMultiplicationTestNetwork_Tuple,
EltwiseMultiplicationTestNetwork_List
])
class TestZero3ParamPartitioningBase(DistributedTest):
world_size = 2
def test(
self,
param_persistence_threshold: int,
fp16_enabled: bool,
contiguous_gradients: bool,
offload_optimizer: bool,
zero_grad: bool,
prefetching: bool,
model_class: EltwiseMultiplicationTestNetwork_Dict,
) -> None:
if offload_optimizer and not contiguous_gradients:
return
m = 3
n = 5
weights = [Parameter(torch.zeros((m, n), dtype=torch.float32)) for _ in range(3)]
model = model_class(*weights)
prefetch_bucket_size = sum([p.numel() for p in model.parameters(recurse=True)])
cfg = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3,
"stage3_max_reuse_distance": 0,
"stage3_param_persistence_threshold": param_persistence_threshold,
"contiguous_gradients": contiguous_gradients,
"stage3_prefetch_bucket_size": prefetch_bucket_size if prefetching else 0
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1.
}
},
"fp16": {
"enabled": fp16_enabled,
"loss_scale": 1.,
}
}
if offload_optimizer:
cfg["zero_optimization"]["offload_optimizer"] = {
"device": "cpu",
"pin_memory": True,
}
ds_engine = _ds_initialize_for_param_partitioning_testing(model, cfg)
for i, weight in enumerate(weights):
weight.ds_tensor.data = torch.full_like(weight.ds_tensor.data, (i + 1) * (1 + dist.get_rank()))
def create_tensor(vals, dtype: torch.dtype = None) -> Tensor:
return torch.as_tensor(vals,
dtype=dtype or (torch.float16 if fp16_enabled else torch.float32),
device=ds_engine.device)
expected_hidden1 = create_tensor([
[1, 1, 1, 1, 1],
[1, 1, 1, 2, 2],
[2, 2, 2, 2, 2],
])
expected_hidden2 = create_tensor([
[2, 2, 2, 2, 2],
[2, 2, 2, 8, 8],
[8, 8, 8, 8, 8],
])
expected_yhat = create_tensor([[6, 6, 6, 6, 6], [6, 6, 6, 48, 48], [48, 48, 48, 48, 48]])
expected_loss = create_tensor([
[5, 5, 5, 5, 5],
[5, 5, 5, 47, 47],
[47, 47, 47, 47, 47],
])
for train_iter in range(3):
activations = ds_engine(
x=torch.ones((m, n), dtype=torch.float16 if fp16_enabled else torch.float32, device=ds_engine.device),
y=torch.ones((m, n), dtype=torch.float16 if fp16_enabled else torch.float32, device=ds_engine.device),
use_module_trace=train_iter > 0,
param_prefetching=prefetching and train_iter > 0,
)
# for ease in testing convert outputs to dict.
activations = model_class.to_dict(activations)
assert torch.allclose(activations["hidden1"], expected_hidden1)
assert torch.allclose(activations["hidden2"], expected_hidden2)
assert torch.allclose(activations["y_hat"], expected_yhat)
assert torch.allclose(activations["loss"], expected_loss)
ds_engine.backward(activations["loss"].sum())
# check the gradients
grad_partitions = ds_engine.optimizer.get_fp32_grad_partitions()
assert set(grad_partitions.keys()) == {0
}, f"should have one parameter group but got {len(grad_partitions)}"
assert set(grad_partitions[0].keys()) == {0, 1, 2}
dloss_wrt_layer1 = grad_partitions[0][0]
dloss_wrt_layer2 = grad_partitions[0][1]
dloss_wrt_layer3 = grad_partitions[0][2]
assert dloss_wrt_layer1.dtype == torch.float
assert dloss_wrt_layer2.dtype == torch.float
assert dloss_wrt_layer3.dtype == torch.float
# layer1 = [..., 1, 2, ...]
# layer2 = [..., 2, 4, ...]
# layer3 = [..., 3, 6, ...]
# dloss_wrt_layer3 = hidden2
# dloss_wrt_layer2 = layer3 * hidden1
# dloss_wrt_layer1 = layer3 * layer2 * x
grad_multiplier = 1 if zero_grad else (train_iter + 1)
if dist.get_rank() == 0:
assert torch.allclose(dloss_wrt_layer3.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([2] * 8, torch.float))
assert torch.allclose(dloss_wrt_layer2.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([3 * 1] * 8, torch.float))
assert torch.allclose(dloss_wrt_layer1.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([3 * 2 * 1] * 8, torch.float))
elif dist.get_rank() == 1:
# parameters dont split evenly across ranks so rank 1 has a zero-padded
# partition
assert torch.allclose(dloss_wrt_layer3.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([8] * 7) + [0], torch.float))
assert torch.allclose(dloss_wrt_layer2.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([6 * 2] * 7) + [0], torch.float))
assert torch.allclose(dloss_wrt_layer1.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([6 * 4 * 1] * 7) + [0], torch.float))
else:
raise RuntimeError("test has world size of two")
if zero_grad:
ds_engine.optimizer.zero_grad()
# TODO. add testing for this - for now we just call it to make sure it
# doesn't throw
ds_engine.optimizer.step()
# taking an optimizer step invalidates all parameters, make sure everything
# has been partitioned afterwards
_assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE})
assert not math.isclose(ds_engine.optimizer._global_grad_norm, 0.0)
@pytest.mark.parametrize("init_context_manager", [True, False])
class TestZero3ParamPartitioningLargeParam(DistributedTest):
world_size = 4
def test(self, init_context_manager: bool, param_sz: int = 8100) -> None:
class LargeParamModel(Module):
def __init__(self):
super().__init__()
self.param = Parameter(torch.zeros((param_sz, ), dtype=torch.float32))
# only do weight initialization on root rank to
# make sure we are broadcasting correctly from rank 0
if dist.get_rank() == 0:
partition_sz = math.ceil(self.param.numel() / dist.get_world_size())
offset = 0
for rank in range(dist.get_world_size()):
with torch.no_grad():
self.param[offset:offset + partition_sz].fill_(rank)
offset += partition_sz
def forward(self, x: Tensor) -> Tensor:
return x * self.param
ds_config = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3,
"stage3_max_reuse_distance": 0,
"contiguous_gradients": True,
"overlap_comm": True,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1.
}
},
"fp16": {
"enabled": True,
"loss_scale": 1.,
}
}
with deepspeed.zero.Init(mem_efficient_linear=False, enabled=init_context_manager):
model = LargeParamModel()
ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_config)
for train_iter in range(3): # test multiple iterations to cover prefetching
activation: Tensor = ds_engine(torch.ones(param_sz, dtype=torch.float16, device=ds_engine.device))
partition_sz = math.ceil(param_sz / self.world_size)
for rank_idx, start_idx in enumerate(range(0, param_sz, partition_sz)):
activation_from_partition = activation[start_idx:start_idx + partition_sz]
assert torch.allclose(activation_from_partition, torch.full_like(activation_from_partition, rank_idx))
ds_engine.backward(activation.sum())
ds_engine.allreduce_gradients()
avgd_gradients = ds_engine.optimizer.averaged_gradients
assert set(avgd_gradients.keys()) == {0}, "should only have one parameter group"
weight_gradient, = avgd_gradients[0]
expected_weight_gradient = (train_iter + 1) * torch.full_like(weight_gradient, 1)
assert torch.allclose(weight_gradient, expected_weight_gradient)
@pytest.mark.parametrize("param_sz", [100, 1_000, 10_000])
@pytest.mark.parametrize("n_layers", [100, 1_000])
@pytest.mark.parametrize("init_context_manager", [True, False])
class TestZero3ParamPartitioningManyParams(DistributedTest):
world_size = 4
def test(self, param_sz: int, n_layers: int, init_context_manager: bool) -> None:
class ManyParamModel(Module):
def __init__(self) -> None:
super().__init__()
self.modulelist = ModuleList(
EltwiseMultiplicationModule(weight=Parameter(torch.empty((param_sz, ), dtype=torch.float32)))
for _ in range(n_layers))
for layer_num, module in enumerate(self.modulelist):
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
param: Parameter = module.weight
partition_sz = math.ceil(param.numel() / dist.get_world_size())
offset = 0
for rank in range(dist.get_world_size()):
with torch.no_grad():
param[offset:offset + partition_sz].fill_(2 * layer_num * rank)
offset += partition_sz
def forward(self, x: Tensor) -> Tensor:
activations = []
for module in self.modulelist:
x = module(x)
activations.append(x)
return activations
ds_cfg = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3,
"stage3_max_reuse_distance": 0,
"contiguous_gradients": True,
"overlap_comm": True,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1.
}
},
"fp16": {
"enabled": True,
"loss_scale": 1.,
}
}
with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=init_context_manager):
model = ManyParamModel()
ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_cfg)
for _ in range(3): # test multiple iterations to cover prefetching
activations: List[Tensor] = ds_engine(
torch.ones((param_sz, ), dtype=torch.float16, device=ds_engine.device))
assert len(activations) == n_layers
partition_sz = math.ceil(param_sz / self.world_size)
expected_activations = torch.empty(param_sz, dtype=torch.float16, device=ds_engine.device)
for start_idx in range(0, param_sz, partition_sz):
expected_activations[start_idx:start_idx + partition_sz] = dist.get_rank()
for layer_num, activation in enumerate(activations):
expected_activations *= 2 * layer_num
assert torch.allclose(activation, expected_activations)
# TODO. finish writing this test
ds_engine.backward(activations[-1].sum())
avgd_gradients = ds_engine.optimizer.averaged_gradients
assert set(avgd_gradients.keys()) == {0}, "should only have one parameter group"
weight_gradients: List[Tensor] = avgd_gradients[0]
for layer_num, activation in enumerate(weight_gradients):
pass
class TestZero3InitForParentWeightInitialization(DistributedTest):
world_size = 4
def test(self):
class ModelWhereParentInitializesChildWeights(Module):
def __init__(self) -> None:
super().__init__()
self.linear = Linear(12, 1)
self.apply(self.__init_weights)
def __init_weights(self, module):
if isinstance(module, Linear):
with torch.no_grad():
module.weight.fill_(1 + dist.get_rank())
ds_cfg = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3,
"stage3_max_reuse_distance": 0,
"contiguous_gradients": True,
"overlap_comm": True,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1.
}
},
"fp16": {
"enabled": True,
"loss_scale": 1.,
}
}
with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=True):
model = ModelWhereParentInitializesChildWeights()
assert model.linear.weight.ds_tensor.numel() == math.ceil(12 / self.world_size)
assert torch.allclose(model.linear.weight.ds_tensor, torch.full_like(model.linear.weight.ds_tensor, 1))
@pytest.mark.skip("not working")
@pytest.mark.parametrize("param_persistence_threshold", [0, 10])
@pytest.mark.parametrize("contiguous_gradients", [True, False])
@pytest.mark.parametrize("offload_optimizer", [True, False])
@pytest.mark.parametrize("zero_grad", [True, False])
@pytest.mark.parametrize("prefetching", [True, False])
@pytest.mark.parametrize("model_class", [
EltwiseMultiplicationTestNetwork_Dict, EltwiseMultiplicationTestNetwork_NamedTuple,
EltwiseMultiplicationTestNetwork_namedtuple, EltwiseMultiplicationTestNetwork_Tuple,
EltwiseMultiplicationTestNetwork_List
])
class TestZero3ParamPartitioningBaseBF16(DistributedTest):
world_size = 2
def test(self, param_persistence_threshold: int, contiguous_gradients: bool, offload_optimizer: bool,
zero_grad: bool, prefetching: bool, model_class: EltwiseMultiplicationTestNetwork_Dict) -> None:
if offload_optimizer and not contiguous_gradients:
return
m = 3
n = 5
weights = [Parameter(torch.zeros((m, n), dtype=torch.float32)) for _ in range(3)]
model = model_class(*weights)
prefetch_bucket_size = sum([p.numel() for p in model.parameters(recurse=True)])
cfg = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3,
"stage3_max_reuse_distance": 0,
"stage3_param_persistence_threshold": param_persistence_threshold,
"contiguous_gradients": contiguous_gradients,
"stage3_prefetch_bucket_size": prefetch_bucket_size if prefetching else 0
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1.
}
},
"bf16": {
"enabled": True,
"loss_scale": 1.,
}
}
if offload_optimizer:
cfg["zero_optimization"]["offload_optimizer"] = {
"device": "cpu",
"pin_memory": True,
}
ds_engine = _ds_initialize_for_param_partitioning_testing(model, cfg)
for i, weight in enumerate(weights):
weight.ds_tensor.data = torch.full_like(weight.ds_tensor.data, (i + 1) * (1 + dist.get_rank()))
def create_tensor(vals):
return torch.as_tensor(vals, dtype=torch.bfloat16, device=ds_engine.device)
expected_hidden1 = create_tensor([
[1, 1, 1, 1, 1],
[1, 1, 1, 2, 2],
[2, 2, 2, 2, 2],
])
expected_hidden2 = create_tensor([
[2, 2, 2, 2, 2],
[2, 2, 2, 8, 8],
[8, 8, 8, 8, 8],
])
expected_yhat = create_tensor([[6, 6, 6, 6, 6], [6, 6, 6, 48, 48], [48, 48, 48, 48, 48]])
expected_loss = create_tensor([
[5, 5, 5, 5, 5],
[5, 5, 5, 47, 47],
[47, 47, 47, 47, 47],
])
for train_iter in range(3):
_assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE})
activations = ds_engine(
x=torch.ones((m, n), dtype=torch.bfloat16, device=ds_engine.device),
y=torch.ones((m, n), dtype=torch.bfloat16, device=ds_engine.device),
use_module_trace=train_iter > 0,
param_prefetching=prefetching and train_iter > 0,
)
# for ease in testing convert outputs to dict.
activations = model_class.to_dict(activations)
assert torch.allclose(activations["hidden1"], expected_hidden1)
assert torch.allclose(activations["hidden2"], expected_hidden2)
assert torch.allclose(activations["y_hat"], expected_yhat)
assert torch.allclose(activations["loss"], expected_loss)
ds_engine.backward(activations["loss"].sum())
_assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE})
# check the gradients
grad_partitions = ds_engine.optimizer.get_fp32_grad_partitions()
assert set(grad_partitions.keys()) == {0
}, f"should have one parameter group but got {len(grad_partitions)}"
assert set(grad_partitions[0].keys()) == {0, 1, 2}
dloss_wrt_layer1 = grad_partitions[0][0]
dloss_wrt_layer2 = grad_partitions[0][1]
dloss_wrt_layer3 = grad_partitions[0][2]
# layer1 = [..., 1, 2, ...]
# layer2 = [..., 2, 4, ...]
# layer3 = [..., 3, 6, ...]
# dloss_wrt_layer3 = hidden2
# dloss_wrt_layer2 = layer3 * hidden1
# dloss_wrt_layer1 = layer3 * layer2 * x
expected_grad_dtype = torch.float32 if offload_optimizer else torch.bfloat16
grad_multiplier = 1 if zero_grad else (train_iter + 1)
if dist.get_rank() == 0:
assert torch.allclose(dloss_wrt_layer3.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([2] * 8).to(expected_grad_dtype))
assert torch.allclose(dloss_wrt_layer2.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([3 * 1] * 8).to(expected_grad_dtype))
assert torch.allclose(dloss_wrt_layer1.to(get_accelerator().device_name()),
grad_multiplier * create_tensor([3 * 2 * 1] * 8).to(expected_grad_dtype))
elif dist.get_rank() == 1:
# parameters dont split evenly across ranks so rank 1 has a zero-padded
# partition
assert torch.allclose(dloss_wrt_layer3.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([8] * 7) + [0]).to(expected_grad_dtype))
assert torch.allclose(dloss_wrt_layer2.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([6 * 2] * 7) + [0]).to(expected_grad_dtype))
assert torch.allclose(dloss_wrt_layer1.to(get_accelerator().device_name()),
grad_multiplier * create_tensor(([6 * 4 * 1] * 7) + [0]).to(expected_grad_dtype))
else:
raise RuntimeError("test has world size of two")
if zero_grad:
ds_engine.optimizer.zero_grad()
# TODO. add testing for this - for now we just call it to make sure it
# doesn't throw
ds_engine.optimizer.step()
_assert_partition_status(ds_engine, {ZeroParamStatus.NOT_AVAILABLE})
class TestZeroOffloadStage1(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 4,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
dist.barrier()
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize('return_type', [tuple, list, dict])
class TestZero3DictFwd(DistributedTest):
world_size = 1
def test(self, return_type):
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 3
}
}
hidden_dim = 10
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim):
super(MyModel, self).__init__()
self.l1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.cel = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
x = self.l1(x)
loss = self.cel(x, y)
if return_type == dict:
val = {'a': x, 'loss': loss, 'b': 1, 'c': None}
elif return_type == list:
val = [x, loss]
elif return_type == tuple:
val = (x, loss)
else:
raise NotImplementedError
return val
with deepspeed.zero.Init():
model = MyModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
dist.barrier()
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if return_type == dict:
loss = loss['loss']
else:
loss = loss[1]
model.backward(loss)
model.step()
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
class TestZeroAdamOptimizerStepCount(DistributedTest):
world_size = 1
def test(self, zero_stage):
# force all params to be partitioned by forcing threshold=0
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
"stage3_param_persistence_threshold": 0,
"sub_group_size": 4,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
model = SimpleModel(hidden_dim=hidden_dim, nlayers=12)
model, optimizer, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device)
for i, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
step_counts = []
if zero_stage == 3:
for sub_group_id, _ in enumerate(optimizer.fp16_groups):
fp32_param = optimizer.fp32_partitioned_groups_flat[sub_group_id]
state = optimizer.optimizer.state[fp32_param]
step_counts.append(state['step'])
assert all(step == step_counts[0] for step in step_counts)
elif zero_stage == 1 or zero_stage == 2:
for param_group in optimizer.optimizer.param_groups:
for param in param_group['params']:
state = optimizer.optimizer.state[param]
step_counts.append(state['step'])
assert all(step == step_counts[0] for step in step_counts)
class TestZeroFrozenWeights(DistributedTest):
world_size = 1
def test(self):
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 3
}
}
hidden_dim = 10
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim):
super(MyModel, self).__init__()
self.l1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.l2 = torch.nn.Linear(hidden_dim, hidden_dim)
self.act = torch.nn.ReLU()
self.cel = torch.nn.CrossEntropyLoss()
# freeze one fc
self.l2.weight.requires_grad = False
self.l2.bias.requires_grad = False
def forward(self, x, y):
x = self.l1(x)
x = self.act(x)
x = self.l2(x)
loss = self.cel(x, y)
val = (x, loss)
return val
with deepspeed.zero.Init(config_dict_or_path=config_dict):
model = MyModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
dist.barrier()
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
loss = loss[1]
model.backward(loss)
model.step()
@pytest.mark.parametrize('force_ds_optim', [True, False])
class TestZeroOffloadOptim(DistributedTest):
world_size = 1
def test(self, force_ds_optim):
config_dict = {
"train_batch_size": 4,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 1,
"offload_optimizer": {
"device": "cpu"
}
},
"zero_force_ds_cpu_optimizer": force_ds_optim,
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.Adam(model.parameters())
if force_ds_optim:
with pytest.raises(ZeRORuntimeException):
model, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict)
else:
model, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict)
@pytest.mark.parametrize('training', [True, False])
class TestZeroPartitionCache(DistributedTest):
world_size = 1
def test_training_partition_cache(self, training):
hidden_dim = 10
config_dict = {
"train_batch_size": 2,
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 3,
"stage3_param_persistence_threshold": hidden_dim
}
}
if training:
config_dict["optimizer"] = {"type": "Adam"}
with deepspeed.zero.Init(config_dict_or_path=config_dict):
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(model=model, config=config_dict)
dtype = torch.half
data_loader = random_dataloader(model=model,
total_samples=6,
hidden_dim=hidden_dim,
device=model.device,
dtype=dtype)
for _, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if training:
model.backward(loss)
model.step()
persist_param_size = sum([p.numel() for p in model.parameters() if p.ds_persist])
assert persist_param_size >= sum([p.numel() for p in model.parameters()])
model.empty_partition_cache()
assert sum([p.numel() for p in model.parameters()]) == 0
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
from unit.common import DistributedTest
from unit.simple_model import UnusedParametersModel, random_dataloader
from deepspeed.ops.op_builder import CPUAdamBuilder
import deepspeed
@pytest.mark.parametrize('ignore_unused_parameters', [False, True])
class TestStage2IgnoreUnusedParameters(DistributedTest):
world_size = 1
def test(self, ignore_unused_parameters):
use_cpu_offload = True
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 2,
"steps_per_print": 1,
"zero_optimization": {
"stage": 2,
"cpu_offload": use_cpu_offload,
"ignore_unused_parameters": ignore_unused_parameters
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
hidden_dim = 4
model = UnusedParametersModel(hidden_dim=hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device)
def _loop():
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
if ignore_unused_parameters:
_loop()
else:
with pytest.raises(AssertionError) as e:
_loop()
assert e.value.args and 'ignore_unused_parameters' in e.value.args[0]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import deepspeed.comm as dist
import torch
from unit.common import DistributedTest
from unit.simple_model import random_dataloader
from unit.util import bf16_required_version_check
import deepspeed
from deepspeed.utils import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.ops.aio import AsyncIOBuilder
def validate_full_tensors(model):
for _, lp in model.named_parameters():
hp = safe_get_full_fp32_param(lp)
exp_avg = safe_get_full_optimizer_state(lp, 'exp_avg')
exp_avg_sq = safe_get_full_optimizer_state(lp, 'exp_avg_sq')
hp_grad = safe_get_full_grad(lp)
param_list = [hp, hp_grad, exp_avg, exp_avg_sq]
if lp.requires_grad:
assert all([p is not None for p in param_list])
else:
assert all([p is None for p in param_list])
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim, frozen_weights):
super(MyModel, self).__init__()
self.act = torch.nn.ReLU()
self.cel = torch.nn.CrossEntropyLoss()
self.linears = torch.nn.ModuleList(
[torch.nn.Linear(hidden_dim, 1),
torch.nn.Linear(1, 1),
torch.nn.Linear(1, hidden_dim)])
if frozen_weights:
self.linears[0].weight.requires_grad = False
self.linears[0].bias.requires_grad = False
def forward(self, x, y):
for l in self.linears:
x = l(x)
x = self.act(x)
loss = self.cel(x, y)
val = (x, loss)
return val
def run_fragmented_model(model, config_dict, hidden_dim, dtype):
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
data_loader = random_dataloader(model=model,
total_samples=10,
hidden_dim=hidden_dim,
device=model.device,
dtype=dtype)
dist.barrier()
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
loss = loss[1]
model.backward(loss)
validate_full_tensors(model)
model.step()
@pytest.mark.parametrize('frozen_weights', [True, False])
class TestTensorFragment(DistributedTest):
# Need multiple gpus to test possible hanging
world_size = 2
@pytest.mark.parametrize('zero_stage', [1, 2, 3])
@pytest.mark.parametrize('offload_device', [OffloadDeviceEnum.none, OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme])
def test_zero_fragments(self, tmpdir, zero_stage, offload_device, frozen_weights):
if offload_device == OffloadDeviceEnum.nvme:
if zero_stage != 3:
pytest.skip(f"Nvme offload not supported for zero stage {zero_stage}")
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
pytest.skip('Skip tests since async-io is not compatible')
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 2
},
"zero_optimization": {
"stage": zero_stage,
}
}
if offload_device == OffloadDeviceEnum.cpu:
config_dict["zero_optimization"]["offload_optimizer"] = {"device": offload_device}
elif offload_device == OffloadDeviceEnum.nvme:
config_dict["zero_optimization"]["offload_optimizer"] = {
"device": offload_device,
"nvme_path": str(tmpdir)
}
hidden_dim = 128
if zero_stage == 3:
with deepspeed.zero.Init(config_dict_or_path=config_dict):
model = MyModel(hidden_dim, frozen_weights)
else:
model = MyModel(hidden_dim, frozen_weights)
run_fragmented_model(model, config_dict, hidden_dim, torch.float16)
def test_bf16_fragments(self, frozen_weights):
if frozen_weights:
pytest.skip("TODO: Frozen weights not currently supported by BF16 Optimizer")
if not bf16_required_version_check(accelerator_check=False):
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"bf16": {
"enabled": True
},
"zero_optimization": {
"stage": 0,
}
}
hidden_dim = 128
model = MyModel(hidden_dim, frozen_weights)
run_fragmented_model(model, config_dict, hidden_dim, torch.bfloat16)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from types import SimpleNamespace
import torch
import deepspeed
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus, partitioned_param_data_shape
import deepspeed.comm as dist
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
from utils import setup_serial_env
# Test that no sub-class or super-class is missed
class ConvX(torch.nn.Conv1d):
def __init__(self, *args):
super().__init__(*args)
# This would not be partitioned before bugfix 5ca8167
self.param_in = torch.nn.Parameter(torch.FloatTensor(5).uniform_())
def forward(self, x):
return x
class ConvNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = ConvX(1, 3, 4)
self.param = torch.nn.Parameter(torch.FloatTensor(5).uniform_())
def forward(self, x):
return x
config = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": 3,
"stage3_param_persistence_threshold": 1,
}
}
class TestZeroGatheredParametersFree(DistributedTest):
world_size = 1
def test(self):
config_dict = {"train_batch_size": 1, "zero_optimization": {"stage": 3}}
hidden_dim = 10
class MyModel(torch.nn.Module):
def __init__(self, hidden_dim):
super(MyModel, self).__init__()
self.l1 = torch.nn.Linear(hidden_dim, hidden_dim)
with deepspeed.zero.Init(config_dict_or_path=config_dict):
model = MyModel(hidden_dim)
with deepspeed.zero.GatheredParameters(list(model.parameters())):
assert model.l1.weight.numel() != 0, "GatheredParameters should give a non-0-sized tensor"
# on exit from `GatheredParameters` the gathered params should be freed and not leak memory
assert model.l1.weight.numel() == 0, "outside of GatheredParameters the param should go back to be 0-sized"
class TestSerialContext(DistributedTest):
world_size = 1
init_distributed = False
set_dist_env = False
def test_subclass_param(self):
setup_serial_env()
with deepspeed.zero.Init(config=config):
model = ConvNet()
assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE
assert model.conv1.param_in.ds_status == ZeroParamStatus.NOT_AVAILABLE
def test_scattered_init_dist(self):
setup_serial_env()
assert not dist.is_initialized()
with deepspeed.zero.Init():
assert dist.is_initialized()
def test_scatter_halftype(self):
setup_serial_env()
with deepspeed.zero.Init():
l = torch.nn.Linear(10, 10)
assert l.weight.ds_tensor.dtype == torch.float16
y = torch.LongTensor([3, 3])
assert y.dtype == torch.long
def test_throughput_calculation(self):
setup_serial_env()
train_micro_batch_size_per_gpu = 7
gradient_accumulation_steps = 6
config_dict = {
"train_micro_batch_size_per_gpu": train_micro_batch_size_per_gpu,
"gradient_accumulation_steps": gradient_accumulation_steps,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
}
},
"zero_optimization": {
"stage": 0
},
}
args = SimpleNamespace(local_rank=0)
net = SimpleModel(hidden_dim=4)
engine, _, _, _ = deepspeed.initialize(args=args,
config=config_dict,
model=net,
model_parameters=net.parameters())
assert engine.tput_timer.batch_size == train_micro_batch_size_per_gpu * gradient_accumulation_steps
assert not engine.tput_timer.initialized
assert not engine.tput_timer.started
assert engine.tput_timer.start_step == 2
assert engine.tput_timer.start_time == 0
assert engine.tput_timer.micro_step_count == 0
assert engine.tput_timer.global_step_count == 0
assert engine.tput_timer.total_elapsed_time == 0
# calling stop() while uninitialized - has no effect
engine.tput_timer.stop()
assert not engine.tput_timer.initialized
assert not engine.tput_timer.started
assert engine.tput_timer.start_time == 0
assert engine.tput_timer.micro_step_count == 0
assert engine.tput_timer.global_step_count == 0
assert engine.tput_timer.total_elapsed_time == 0
# any call to start() (from dataloader or not) initializes the timer
engine.tput_timer.start()
assert engine.tput_timer.initialized
assert engine.tput_timer.started
assert engine.tput_timer.start_time == 0
assert engine.tput_timer.micro_step_count == 0
assert engine.tput_timer.global_step_count == 0
assert engine.tput_timer.total_elapsed_time == 0
# calling stop() after initialized - increments the local micro step counter
engine.tput_timer.stop()
assert engine.tput_timer.initialized
assert not engine.tput_timer.started
assert engine.tput_timer.start_time == 0
assert engine.tput_timer.micro_step_count == 1
assert engine.tput_timer.global_step_count == 0
assert engine.tput_timer.total_elapsed_time == 0
# calling start()/stop() to increment the step counter until start_step
while engine.tput_timer.micro_step_count < (gradient_accumulation_steps * engine.tput_timer.start_step):
engine.tput_timer.start()
global_step = (engine.tput_timer.micro_step_count + 1) % gradient_accumulation_steps == 0
engine.tput_timer.stop(global_step=global_step)
assert engine.tput_timer.global_step_count == engine.tput_timer.start_step
assert engine.tput_timer.total_elapsed_time == 0
# calling start()/stop() accumulates duration during gradient accumulation
while engine.tput_timer.global_step_count == engine.tput_timer.start_step:
engine.tput_timer.start()
current_duration = engine.tput_timer.step_elapsed_time
total_duration = engine.tput_timer.total_elapsed_time
global_step = (engine.tput_timer.micro_step_count + 1) % gradient_accumulation_steps == 0
engine.tput_timer.stop(global_step=global_step)
duration = engine.tput_timer.end_time - engine.tput_timer.start_time
# step elapsed time is reset after gradient accumulation steps
assert engine.tput_timer.step_elapsed_time == (
0 if engine.tput_timer.global_step_count != engine.tput_timer.start_step else current_duration +
duration)
assert engine.tput_timer.total_elapsed_time == total_duration + duration
def test_ext_param_getattr(self):
setup_serial_env()
class ExtLinear(torch.nn.Module):
def __init__(self, dim=16):
super().__init__()
self.dim = dim
self.linear1 = torch.nn.Linear(dim, dim)
self.linear2 = torch.nn.Linear(dim, dim)
def forward(self, input):
A = self.linear1(input)
B = self.linear2(A)
# external use of self.linear1.weight
C = torch.nn.functional.linear(B, self.linear1.weight)
return C.sum()
net = ExtLinear()
args = SimpleNamespace(local_rank=0)
engine, optim, _, _ = deepspeed.initialize(args=args,
model=net,
model_parameters=net.parameters(),
config=config)
with deepspeed.zero.GatheredParameters(net.linear1.weight):
assert net.linear1.weight.numel() == net.dim**2
input = torch.rand(net.dim).to(engine.device).half()
loss = engine(input)
engine.backward(loss)
engine.step()
class TestScatterGather(DistributedTest):
world_size = 2
def test(self):
with deepspeed.zero.Init():
l = torch.nn.Linear(6, 3)
assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE
assert l.weight.shape == torch.Size(partitioned_param_data_shape)
# Ensure there is no impact outside the context
l2 = torch.nn.Linear(6, 3)
assert not hasattr(l2.weight, 'ds_status')
assert l2.weight.numel() == l2.in_features * l2.out_features
with deepspeed.zero.GatheredParameters(l.weight):
assert l.weight.ds_status == ZeroParamStatus.AVAILABLE
assert l.weight.numel() == l.in_features * l.out_features
class TestGatherUpdate(DistributedTest):
world_size = 2
def test(self):
with deepspeed.zero.Init():
l = torch.nn.Linear(4, 2)
assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE
# Gather and make a change
with deepspeed.zero.GatheredParameters(l.weight, modifier_rank=1):
assert l.weight.ds_status == ZeroParamStatus.AVAILABLE
if dist.get_rank() == 1:
with torch.no_grad():
l.weight.zero_()
# should now be scattered again
# Now gather again and ensure the change is global
with deepspeed.zero.GatheredParameters(l.weight):
# all ranks compare
assert torch.equal(l.weight, torch.zeros_like(l.weight))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from unit.common import get_master_port
def setup_serial_env():
# Setup for a serial run
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = get_master_port()
os.environ['LOCAL_RANK'] = '0'
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from unit.common import DistributedTest
import deepspeed
class TestNewClassDeclaredInsideInit(DistributedTest):
world_size = 1
def test_new_class_declared_inside_init(self):
ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3))
with deepspeed.zero.Init(config_dict_or_path=ds_config):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(4, 4)
with deepspeed.zero.Init(config_dict_or_path=ds_config):
model = MyModel()
deepspeed_engine, *_ = deepspeed.initialize(model=model, config_params=ds_config)
# ensure that zero3 processed the parameter
assert hasattr(deepspeed_engine.fc.weight, "ds_id")
class TestNewClassDeclaredInsideInitFailure(DistributedTest):
world_size = 1
def test_new_class_declared_inside_init_failure(self):
ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3))
try:
with deepspeed.zero.Init(config_dict_or_path=ds_config):
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(1, 1)
model = MyModel()
assert False, "Should have failed. A subclass of torch.nn.Module must be defined before zero.Init() where an instance of the class is created."
except RuntimeError as e:
pass
except:
assert False, "Should have failed. Runtime error is expected."
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from unit.common import DistributedTest
import deepspeed
class TestNestingInit(DistributedTest):
world_size = 1
def test_nesting_init(self):
ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3))
with deepspeed.zero.Init(config_dict_or_path=ds_config):
with deepspeed.zero.Init(config_dict_or_path=ds_config):
model = torch.nn.Linear(4, 4)
deepspeed_engine, *_ = deepspeed.initialize(model=model, config_params=ds_config)
# ensure that zero3 processed the parameter
assert hasattr(deepspeed_engine.weight, "ds_id")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from types import SimpleNamespace
import torch
import pytest
import deepspeed
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from utils import setup_serial_env
from unit.common import DistributedTest
class DanglingBias(torch.nn.Linear):
def forward(self, *inputs):
out = super().forward(*inputs)
# return the bias to trigger a dangling external param
return out, self.bias
class DataClass:
"""Just wraps data in an object. """
def __init__(self, out=None, bias=None):
self.out = out
self.bias = bias
class DanglingBiasClass(DanglingBias):
def forward(self, *inputs):
out, bias = super().forward(*inputs)
return DataClass(out=out, bias=bias)
class DanglingAttention(torch.nn.Linear):
def __init__(self, dim=16, return_obj=False):
super().__init__(dim, dim)
self.dim = dim
self.return_obj = return_obj
if return_obj:
self.d_linear = DanglingBiasClass(dim, dim)
else:
self.d_linear = DanglingBias(dim, dim)
def forward(self, input):
out = super().forward(input)
if self.return_obj:
out_obj = self.d_linear(out)
assert out_obj.bias.ds_status == ZeroParamStatus.AVAILABLE
# forward the external param
return out_obj.out, out_obj.bias
else:
out, bias = self.d_linear(out)
assert hasattr(bias, 'ds_status') or hasattr(bias, 'ds_param_alias')
z3_bias = bias if hasattr(bias, 'ds_status') else bias.ds_param_alias
assert z3_bias.ds_status == ZeroParamStatus.AVAILABLE
return out, bias
class ModelContainer(torch.nn.Module):
def __init__(self, dim=16, return_obj=False):
super().__init__()
self.dim = dim
self.linear1 = torch.nn.Linear(dim, dim)
self.dangler = DanglingAttention(dim, return_obj=return_obj)
def forward(self, input):
act1 = self.linear1(input)
# bias is actually dangler.d_linear1.bias
act2, bias = self.dangler(act1)
return (act2 + bias).sum()
class DanglingExt(torch.nn.Module):
def __init__(self, dim=16):
super().__init__()
self.dim = dim
self.container = ModelContainer(dim)
def forward(self, input):
out = self.container(input)
# Make sure it's at the right level of the stack
assert len(self._external_params) == 0
assert len(self.container._external_params) == 1
assert len(self.container.dangler._external_params) == 0
return out
class ModelContainerVariableOutputType(ModelContainer):
def __init__(self, dim=16, output_type=dict):
super().__init__()
self.output_type = output_type
self.dim = dim
self.linear1 = torch.nn.Linear(dim, dim)
def forward(self, input):
act1 = self.linear1(input)
if self.output_type is dict:
return {'loss': act1.sum()}
if self.output_type is torch.tensor:
return act1.sum()
config = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": 3,
"stage3_param_persistence_threshold": 1,
}
}
class TestReturnParam(DistributedTest):
world_size = 1
def test_ext_param_return(self):
setup_serial_env()
net = DanglingExt()
args = SimpleNamespace(local_rank=0)
engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config)
for _ in range(5):
input = torch.rand(net.dim).to(engine.device).half()
loss = engine(input)
engine.backward(loss)
engine.step()
@pytest.mark.skip('WIP')
def test_ext_param_returnobj(self):
setup_serial_env()
print()
net = ModelContainer(return_obj=True)
args = SimpleNamespace(local_rank=0)
engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config)
for _ in range(5):
input = torch.rand(net.dim).to(engine.device).half()
loss = engine(input)
assert len(net._external_params) == 1
assert len(net.dangler._external_params) == 0
engine.backward(loss)
engine.step()
@pytest.mark.parametrize('output_type', [torch.tensor, dict, None])
def test_stage_3_output_type(self, output_type):
setup_serial_env()
print()
net = ModelContainerVariableOutputType(output_type=output_type)
args = SimpleNamespace(local_rank=0)
engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config)
for _ in range(1):
input = torch.rand(net.dim).to(engine.device).half()
loss = engine(input)
if loss is not None:
if isinstance(loss, dict):
loss = loss['loss']
engine.backward(loss)
engine.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.accelerator import get_accelerator
from utils import setup_serial_env
from unit.common import DistributedTest
config = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": 3,
"stage3_param_persistence_threshold": 1,
}
}
# test that sub-classes get params that aren't prematurely partitioned and thus requiring gathering
# fixed by https://github.com/microsoft/DeepSpeed/pull/1202
class GrandPa(torch.nn.Module):
def __init__(self, *args):
super().__init__(*args)
self.param_grandpa = torch.nn.Parameter(torch.ones(5))
self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned
class Pa(GrandPa):
def __init__(self, *args):
super().__init__(*args)
self.param_pa = torch.nn.Parameter(torch.ones(5))
self.param_pa.data = (self.param_pa.data + 1).data # test param is not yet partitioned
self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned
class Son(Pa):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.ones(5))
self.param.data = (self.param.data + 1).data # test param is not yet partitioned
self.param_pa.data = (self.param_pa.data + 1).data # test param is not yet partitioned
self.param_grandpa.data = (self.param_grandpa.data + 1).data # test param is not yet partitioned
class TestSerialParamInit(DistributedTest):
world_size = 1
init_distributed = False
set_dist_env = False
def test_subclass_param_init(self):
setup_serial_env()
with deepspeed.zero.Init(config=config):
model = Son().cpu()
# test that all params have been partitioned
assert model.param_grandpa.ds_status == ZeroParamStatus.NOT_AVAILABLE
assert model.param_pa.ds_status == ZeroParamStatus.NOT_AVAILABLE
assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE
# test that the weights manipulation during each __init__ worked in all w/o needing gathering
ones = torch.ones(5).half().to(get_accelerator().device_name())
with deepspeed.zero.GatheredParameters(list(model.parameters(recurse=False))):
assert torch.equal(model.param, ones + 1)
assert torch.equal(model.param_pa, ones + 2)
assert torch.equal(model.param_grandpa, ones + 3)
class TestDSInitWZinit(DistributedTest):
world_size = 2
def test(self):
ds_config = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
}
}
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(4, 4)
def magic(self):
return 42
with deepspeed.zero.Init():
model = Model()
engine, *_ = deepspeed.initialize(model=model, config=ds_config, model_parameters=model.parameters())
assert engine.magic() == 42
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# TODO: add tests with model parallelism for activation partitioning and other features.
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from copy import deepcopy
from unit.common import DistributedTest
ckpt = deepspeed.checkpointing.checkpoint
def _compute(module, *inputs, do_checkpoint=False):
if do_checkpoint:
outputs = ckpt(module, *inputs)
else:
outputs = module(*inputs)
if torch.is_tensor(outputs):
outputs = (outputs, )
sum(o.sum() for o in outputs if torch.is_tensor(o) and o.requires_grad).backward()
grads = [p.grad for p in module.parameters()]
input_grads = [inp.grad for inp in inputs if torch.is_tensor(inp)]
return {
'outputs': outputs,
'module_grads': grads,
'input_grads': input_grads,
}
def _prep_inputs(*inputs):
_inputs = []
for inp in inputs:
inp = deepcopy(inp)
if torch.is_tensor(inp):
inp = inp.to(get_accelerator().device_name())
_inputs.append(inp)
return tuple(_inputs)
def _match_outputs(ref, tgt):
assert type(ref) == type(tgt)
if type(ref) in [list, tuple]:
for x, y in zip(ref, tgt):
_match_outputs(x, y)
elif not torch.is_tensor(ref):
assert ref == tgt
elif ref.is_floating_point():
assert torch.allclose(ref, tgt)
else:
assert torch.equal(ref, tgt)
def _test_activation_checkpoint(module, *inputs):
# Move to device
module.to(get_accelerator().device_name())
# Get rid of dropouts until we fork the RNG between tests.
module.eval()
module_ = deepcopy(module)
inputs_ = _prep_inputs(*inputs)
base = _compute(module_, *inputs_, do_checkpoint=False)
module_ = deepcopy(module)
inputs_ = _prep_inputs(*inputs)
test = _compute(module_, *inputs_, do_checkpoint=True)
for group in base.keys():
for b, t in zip(base[group], test[group]):
_match_outputs(b, t)
def _test_activation_checkpoint_ordering(module, expected_ordering, *inputs):
# Move to device
module.to(get_accelerator().device_name())
# Get rid of dropouts until we fork the RNG between tests.
module.eval()
module_ = deepcopy(module)
inputs_ = _prep_inputs(*inputs)
test = _compute(module_, *inputs_, do_checkpoint=True)
outputs = test['outputs']
test_ordering = []
for item in outputs:
if type(item) in [list, tuple]:
test_ordering += [torch.is_tensor(t) for t in item]
else:
test_ordering += [torch.is_tensor(item)]
assert expected_ordering == test_ordering
#
# Helpers
#
class MaskedLinear(torch.nn.Linear):
def forward(self, x, mask):
out = super().forward(x)
if mask.is_floating_point():
out = out * mask
else:
# must cast BoolTensor in older torch versions
out = out * mask.type_as(out)
return out
class MaskedLinearSeq(MaskedLinear):
"""Tests pipeline modules by also returning the mask."""
def forward(self, x, mask):
return super().forward(x, mask), mask
class MaskedLinearSeqDup(MaskedLinearSeq):
"""MaskedLinearSeq, but with more outputs than inputs and in a different order."""
def forward(self, x, mask):
dup = x.clone().detach() * 1.38 # just an arbitrary scaling
x, mask = super().forward(x, mask)
return dup, x, mask
class DropMaskLinear(torch.nn.Linear):
def forward(self, x, mask):
return super().forward(x)
class LinearNonTensorInput(torch.nn.Linear):
def forward(self, x, non_tensor_input):
return super().forward(x)
class LinearNonTensorOutput(torch.nn.Linear):
def __init__(self, non_tensor_output):
super().__init__(HIDDEN_DIM, HIDDEN_DIM)
self.non_tensor_output = non_tensor_output
def forward(self, x):
out = super().forward(x)
return out, self.non_tensor_output
HIDDEN_DIM = 20
def _mixed_mask(size=HIDDEN_DIM):
entries = torch.randn(size)
mask = torch.where(entries > 0, torch.ones(size), torch.zeros(size))
mask = mask.bool()
return mask
def _bool_to_float(btensor, dtype=torch.float32):
"""Converts a torch.BoolTensor to an equivalent dtype. """
ones = torch.ones(size=btensor.size(), dtype=dtype)
zeros = torch.zeros(size=btensor.size(), dtype=dtype)
return torch.where(btensor, ones, zeros)
#
# Tests
#
# both bool and float are important, as bool is not differentiable
@pytest.mark.parametrize('mask', [
_mixed_mask(),
_bool_to_float(_mixed_mask()),
])
class TestActivationCheckpoint(DistributedTest):
world_size = 1
def test_ckpt_inputs1_outputs1(self, mask):
module = torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs)
def test_ckpt_inputs2_outputs1(self, mask):
module = MaskedLinear(HIDDEN_DIM, HIDDEN_DIM)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs, mask)
def test_ckpt_inputs2_outputs2(self, mask):
module = MaskedLinearSeq(HIDDEN_DIM, HIDDEN_DIM)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs, mask)
def test_ckpt_inputs2_outputs3(self, mask):
module = MaskedLinearSeqDup(HIDDEN_DIM, HIDDEN_DIM)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs, mask)
def test_ckpt_arg_none(self, mask):
module = DropMaskLinear(HIDDEN_DIM, HIDDEN_DIM)
inputs = (torch.rand(HIDDEN_DIM), None)
inputs[0].requires_grad = True
_test_activation_checkpoint(module, *inputs)
@pytest.mark.parametrize('non_tensor', [None, 2, True, (None, 2.5), (None, True, torch.randn(HIDDEN_DIM))])
class TestCheckpointNonTensor(DistributedTest):
world_size = 1
def test_ckpt_non_tensor_input(self, non_tensor):
module = LinearNonTensorInput(HIDDEN_DIM, HIDDEN_DIM)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs, non_tensor)
def test_ckpt_non_tensor_output(self, non_tensor):
module = LinearNonTensorOutput(non_tensor)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
_test_activation_checkpoint(module, inputs)
@pytest.mark.parametrize('non_tensor_output', [
None, (torch.randn(HIDDEN_DIM), 2.5), (None, torch.randn(HIDDEN_DIM), True), (None, True, torch.randn(HIDDEN_DIM))
])
class TestCheckpointNonTensorOutputOrdering(DistributedTest):
world_size = 1
def test_ckpt_non_tensor_output_ordering(self, non_tensor_output):
module = LinearNonTensorOutput(non_tensor_output)
inputs = torch.rand(HIDDEN_DIM)
inputs.requires_grad = True
# First return is a tensor
ordering = [True]
if type(non_tensor_output) in [list, tuple]:
ordering += [torch.is_tensor(t) for t in non_tensor_output]
else:
ordering += [torch.is_tensor(non_tensor_output)]
_test_activation_checkpoint_ordering(module, ordering, inputs)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed.comm as dist
import deepspeed
import pytest
from deepspeed.ops.adam import FusedAdam
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader
from unit.util import required_torch_version
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import CPUAdamBuilder
try:
from apex import amp # noqa: F401
_amp_available = True
except ImportError:
_amp_available = False
amp_available = pytest.mark.skipif(not _amp_available, reason="apex/amp is not installed")
class TestLambFP32GradClip(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestLambFP16(DistributedTest):
world_size = 2
def test__basic(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
def test_empty_grad(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestAdamFP32EmptyGrad(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": False
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestAdamwFP16Basic(DistributedTest):
world_size = 1
def test(self):
config_dict = {"train_batch_size": 1, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestFP16OptimizerForMoE(DistributedTest):
world_size = 2
def test_unfused_gradnorm(self, monkeypatch):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 2, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 10
def mock_unscale_and_clip_grads(total_norm, apply_scale=True):
torch_norm_tensor = get_accelerator().FloatTensor([total_norm])
all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(all_gather_results, torch_norm_tensor)
assert len(set([x.item() for x in all_gather_results])) == 1
return 1.0
# initialize MoE
model = SimpleMoEModel(hidden_dim, ep_size=2)
optimizer = torch.optim.AdamW(params=model.parameters())
engine, optimizer, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
dist_init_required=False)
monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads)
data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device)
for n, batch in enumerate(data_loader):
loss = engine(batch[0], batch[1])
engine.backward(loss)
engine.step()
def test_fused_gradnorm(self, monkeypatch):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 2, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 10
def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True):
torch_norm_tensor = get_accelerator().FloatTensor([total_norm])
all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(all_gather_results, torch_norm_tensor)
assert len(set([x.item() for x in all_gather_results])) == 1
return 1.0
# initialize MoE
model = SimpleMoEModel(hidden_dim, ep_size=2)
# optimizer = torch.optim.AdamW(params=model.parameters())
optimizer = FusedAdam(params=model.parameters())
engine, optimizer, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
dist_init_required=False)
monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads)
data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device)
for n, batch in enumerate(data_loader):
loss = engine(batch[0], batch[1])
engine.backward(loss)
engine.step()
@pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)])
def test_lamb_gradnorm(self, monkeypatch, fused_lamb_legacy: bool):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
}
}
hidden_dim = 10
def mock_unscale_and_clip_grads(total_norm, apply_scale=True):
torch_norm_tensor = get_accelerator().FloatTensor([total_norm])
all_gather_results = [torch.zeros_like(torch_norm_tensor) for _ in range(dist.get_world_size())]
dist.all_gather(all_gather_results, torch_norm_tensor)
assert len(set([x.item() for x in all_gather_results])) == 1
return 1.0
# initialize MoE
model = SimpleMoEModel(hidden_dim, ep_size=2)
engine, optimizer, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters(),
dist_init_required=False)
monkeypatch.setattr(optimizer, 'unscale_and_clip_grads', mock_unscale_and_clip_grads)
optimizer.fused_lamb_legacy = fused_lamb_legacy
data_loader = sequence_dataloader(model=engine, total_samples=50, hidden_dim=hidden_dim, device=engine.device)
for n, batch in enumerate(data_loader):
loss = engine(batch[0], batch[1])
engine.backward(loss)
engine.step()
class TestAdamwFP16EmptyGrad(DistributedTest):
world_size = 1
def test(self):
config_dict = {"train_batch_size": 1, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("use_cpu_offload", [True, False])
class TestAdamFP16ZeroOneCycleCompatibility(DistributedTest):
world_size = 1
def test(self, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_first_step_size": 16000,
"cycle_first_stair_count": 8000,
"decay_step_size": 16000,
"cycle_min_lr": 1e-06,
"cycle_max_lr": 3e-05,
"decay_lr_rate": 1e-07,
"cycle_min_mom": 0.85,
"cycle_max_mom": 0.99,
"decay_mom_rate": 0.0
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("use_cpu_offload", [True, False])
@pytest.mark.parametrize("hidden_dim", [9, 10])
class TestZeroStaticScale(DistributedTest):
world_size = 1
def test(self, zero_stage, use_cpu_offload, hidden_dim):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# Ensure the static scaler is configured.
assert optim.dynamic_loss_scale == False
assert optim.loss_scaler.loss_scale == 138.
# Now make sure things work..
data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("use_cpu_offload", [True, False])
class TestZeroAllowUntestedOptimizer(DistributedTest):
world_size = 1
def test(self, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"fp16": {
"enabled": True,
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
},
"zero_allow_untested_optimizer": False,
"zero_force_ds_cpu_optimizer": False
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = SimpleOptimizer(model.parameters())
with pytest.raises(AssertionError):
model, optim, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
model_parameters=model.parameters())
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("use_cpu_offload", [True, False])
class TestZeroEmptyPartition(DistributedTest):
world_size = 3
def test(self, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
if zero_stage == 3:
pytest.skip("skip for now")
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload,
"reduce_bucket_size": 100,
"allgather_bucket_size": 100
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
# Ensure model has 2 parameters, to cause empty partition with DP=3
assert len(list(model.parameters())) == 2
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# Now make sure things work..
data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@amp_available
class TestAmp(DistributedTest):
world_size = 2
def test_adam_basic(self):
config_dict = {"train_batch_size": 2, "steps_per_print": 1, "amp": {"enabled": True}}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.Adam(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
def test_lamb_basic(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
def test_adam_O2(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
"opt_level": "O2"
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
def test_adam_O2_empty_grad(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
"opt_level": "O2"
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("optimizer_constructor", [FusedAdam, torch.optim.Adam])
class TestZeroSupportedClientOptimizer(DistributedTest):
world_size = 1
def test(self, zero_stage, optimizer_constructor):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
client_optimizer = optimizer_constructor(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer)
class TestZero2ReduceScatterOff(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 2,
"contiguous_gradients": True,
"allgather_bucket_size": 2000000000,
"reduce_bucket_size": 200000000,
"overlap_comm": False,
"reduce_scatter": False
},
"fp16": {
"enabled": True
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("adam_type", ["Adam", "AdamW"])
@pytest.mark.parametrize("torch_impl", [True, False])
class TestFP16AdamTypes(DistributedTest):
world_size = 1
def test(self, adam_type, torch_impl):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 10
},
"optimizer": {
"type": adam_type,
"torch_adam": torch_impl,
"params": {
"lr": 0.00015
}
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device)
for _, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestZero3LazyScatter(DistributedTest):
world_size = 1
def test(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 10
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 0.00015
}
},
"zero_optimization": {
"stage": 3
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device)
for _, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize('stage', [1, 2, 3])
class TestZeroEmptyGrad(DistributedTest):
world_size = 1
def test(self, stage):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": stage
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.Adam(model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import pytest
from deepspeed.ops.adam import FusedAdam
from unit.common import DistributedTest
from deepspeed.ops.op_builder import CPUAdamBuilder
from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader
from unit.util import bf16_required_version_check
from deepspeed import comm as dist
class TestAdamBF16ZeroOneCycleCompatibility(DistributedTest):
world_size = 1
def test(self, zero_stage=2, use_cpu_offload=False):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_first_step_size": 16000,
"cycle_first_stair_count": 8000,
"decay_step_size": 16000,
"cycle_min_lr": 1e-06,
"cycle_max_lr": 3e-05,
"decay_lr_rate": 1e-07,
"cycle_min_mom": 0.85,
"cycle_max_mom": 0.99,
"decay_mom_rate": 0.0
}
},
"fp16": {
"enabled": False
},
"bf16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.bfloat16)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestZeroAllowUntestedOptimizer(DistributedTest):
world_size = 1
def test(self, zero_stage=2, use_cpu_offload=False):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_micro_batch_size_per_gpu": 4,
"steps_per_print": 1,
"fp16": {
"enabled": False,
},
"bf16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
},
"zero_allow_untested_optimizer": False
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = SimpleOptimizer(model.parameters())
with pytest.raises(AssertionError):
model, optim, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
model_parameters=model.parameters())
class TestZeroEmptyPartition(DistributedTest):
world_size = 3
def test(self, zero_stage=2, use_cpu_offload=False):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
if zero_stage == 3:
pytest.skip("skip for now")
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"fp16": {
"enabled": False
},
"bf16": {
"enabled": True
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload,
"reduce_bucket_size": 100,
"allgather_bucket_size": 100
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
# Ensure model has 2 parameters, to cause empty partition with DP=3
assert len(list(model.parameters())) == 2
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
# Now make sure things work..
data_loader = random_dataloader(model=model,
total_samples=1,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.bfloat16)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("optimizer_constructor", [torch.optim.Adam, FusedAdam])
class TestZeroSupportedClientOptimizer(DistributedTest):
world_size = 1
def test(self, optimizer_constructor, zero_stage=2):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"steps_per_print": 1,
"fp16": {
"enabled": False
},
"bf16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
client_optimizer = optimizer_constructor(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer)
class TestZero2ReduceScatterOff(DistributedTest):
world_size = 2
def test(self):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 2,
"contiguous_gradients": True,
"allgather_bucket_size": 2000000000,
"reduce_bucket_size": 200000000,
"overlap_comm": False,
"reduce_scatter": False
},
"fp16": {
"enabled": False
},
"bf16": {
"enabled": True
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.bfloat16)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestZeroEmptyGrad(DistributedTest):
world_size = 1
def test(self, stage=2):
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"fp16": {
"enabled": False
},
"bf16": {
"enabled": True
},
"zero_optimization": {
"stage": stage
}
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.Adam(model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.bfloat16)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("comp_type", [torch.float16, torch.bfloat16, torch.float], ids=["fp16", "bfp16", "fp32"])
@pytest.mark.parametrize("comm_type", [torch.float16, torch.bfloat16, None], ids=["fp16", "bfp16", "default"])
class TestZeroDtypeCocktail(DistributedTest):
world_size = 2
def test(self, comp_type, comm_type):
if comp_type == torch.bfloat16 or comm_type == torch.bfloat16:
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
type_str = {torch.float16: "fp16", torch.bfloat16: "bfp16"}
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"steps_per_print": 1,
"fp16": {
"enabled": comp_type == torch.float16
},
"bf16": {
"enabled": comp_type == torch.bfloat16
},
"zero_optimization": {
"stage": 2
},
}
if comm_type is not None:
config_dict["communication_data_type"] = type_str[comm_type]
else:
comm_type = comp_type
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = torch.optim.Adam(model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=2,
hidden_dim=hidden_dim,
device=model.device,
dtype=comp_type)
def custom_reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, async_op=False):
assert tensor.dtype == comm_type
return orig_torch_reduce(tensor, dst, op, group, async_op)
orig_torch_reduce = dist.reduce
dist.reduce = custom_reduce
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
dist.reduce = orig_torch_reduce
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import numpy as np
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
def run_model_step(model, gradient_list):
for value in gradient_list:
for p in model.parameters():
p.grad = torch.empty_like(p, dtype=p.dtype)
p.grad.fill_(value)
model.step()
class TestFused(DistributedTest):
world_size = 1
def test_no_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 8,
"loss_scale_window": 2
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**8
expected_scale_window = 2
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
assert optim.scale_window == expected_scale_window
for i, value in enumerate(np.random.uniform(-0.1, 0.1, 10)):
run_model_step(model, [value])
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == (i + 1)
if optim.cur_iter % expected_scale_window == 0:
expected_loss_scale *= 2
def test_all_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 4,
"loss_scale_window": 2
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**4
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
overflow_gradients = [float('inf'), float('-inf')] + [float('nan')] * 6
for i, value in enumerate(overflow_gradients):
run_model_step(model, [value])
expected_loss_scale = max(expected_loss_scale / 2, 1)
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == (i + 1)
def test_some_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 8,
"loss_scale_window": 2
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**8
expected_scale_window = 2
expected_iteration = 0
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
assert optim.scale_window == expected_scale_window
# Run model with overflows to decrease scale
overflow_gradients = [float('inf'), float('nan')]
expected_iteration += len(overflow_gradients)
run_model_step(model, overflow_gradients)
expected_loss_scale /= (2**len(overflow_gradients))
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
# Run model scale_window + 1 times to increase scale once
normal_gradients = np.random.uniform(-0.1, 0.1, expected_scale_window + 1)
expected_iteration += len(normal_gradients)
run_model_step(model, normal_gradients)
expected_loss_scale *= 2
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
# Run model with overflows to decrease scale
overflow_gradients = [float('inf')]
expected_iteration += len(overflow_gradients)
run_model_step(model, overflow_gradients)
expected_loss_scale /= (2**len(overflow_gradients))
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
class TestUnfused(DistributedTest):
world_size = 1
def test_no_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 8,
"loss_scale_window": 2
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**8
expected_scale_window = 2
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
assert optim.scale_window == expected_scale_window
for i, value in enumerate(np.random.uniform(-0.1, 0.1, 10)):
run_model_step(model, [value])
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == (i + 1)
if optim.cur_iter % expected_scale_window == 0:
expected_loss_scale *= 2
def test_all_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 4,
"loss_scale_window": 2,
"min_loss_scale": 0.25
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**4
expected_min_loss_scale = 0.25
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
assert optim.min_loss_scale == expected_min_loss_scale
overflow_gradients = [float('inf'), float('-inf')] + [float('nan')] * 6
for i, value in enumerate(overflow_gradients):
run_model_step(model, [value])
expected_loss_scale = max(expected_loss_scale / 2, expected_min_loss_scale)
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == (i + 1)
def test_some_overflow(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 8,
"loss_scale_window": 2
}
}
hidden_dim = 1
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
expected_loss_scale = 2**8
expected_scale_window = 2
expected_iteration = 0
# Ensure the dynamic loss scaler is correctly configured.
assert optim.dynamic_loss_scale == True
assert optim.cur_scale == expected_loss_scale
assert optim.scale_window == expected_scale_window
# Run model with overflows to decrease scale
overflow_gradients = [float('inf'), float('nan')]
expected_iteration += len(overflow_gradients)
run_model_step(model, overflow_gradients)
expected_loss_scale /= (2**len(overflow_gradients))
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
# Run model scale_window + 1 times to increase scale once
normal_gradients = np.random.uniform(-0.1, 0.1, expected_scale_window + 1)
expected_iteration += len(normal_gradients)
run_model_step(model, normal_gradients)
expected_loss_scale *= 2
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
# Run model with overflows to decrease scale
overflow_gradients = [float('inf')]
expected_iteration += len(overflow_gradients)
run_model_step(model, overflow_gradients)
expected_loss_scale /= (2**len(overflow_gradients))
assert optim.cur_scale == expected_loss_scale
assert optim.cur_iter == expected_iteration
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
import deepspeed.comm as dist
import deepspeed
import pytest
import copy
import os
import numpy as np
from deepspeed.runtime.pipe.topology import PipeDataParallelTopology
from deepspeed.ops.op_builder import OpBuilder
from deepspeed.runtime.pipe.module import PipelineModule
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, random_dataloader
from unit.alexnet_model import AlexNetPipe, train_cifar
from unit.util import required_minimum_torch_version
from deepspeed.accelerator import get_accelerator
PipeTopo = PipeDataParallelTopology
if not required_minimum_torch_version(major_version=1, minor_version=8):
pytest.skip(
"NCCL-based 1-bit compression requires torch 1.8 or higher",
allow_module_level=True,
)
rocm_version = OpBuilder.installed_rocm_version()
if rocm_version[0] > 4:
pytest.skip("NCCL-based 1-bit compression is not yet supported w. ROCm 5 until cupy supports ROCm 5",
allow_module_level=True)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"])
class TestOneBitAdamBasic(DistributedTest):
world_size = 2
def test(self, dtype):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": (dtype == torch.float16),
"loss_scale": 0,
"initial_scale_power": 16,
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(
model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=dtype,
)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestOneBitAdamExpAvgMask(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
mask1 = torch.flatten(mask1)
optimizer_grouped_parameters = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model, optimizer, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters,
)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Test whether the momentum mask works
for v in optimizer.state.values():
if v["exp_avg"].size() == mask1.size():
assert torch.allclose(
v["exp_avg"],
v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)),
atol=1e-07,
), f"Momentum mask is not working properly"
class TestOneBitAdamCheckpointing(DistributedTest):
world_size = 2
def test(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
mask2 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
mask2[1][col] += 1
mask1 = torch.flatten(mask1)
mask2 = torch.flatten(mask2)
optimizer_grouped_parameters_1 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_2 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask2,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_3 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model_1, optimizer_1, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_1,
)
data_loader = random_dataloader(
model=model_1,
total_samples=10,
hidden_dim=hidden_dim,
device=model_1.device,
)
for n, batch in enumerate(data_loader):
loss = model_1(batch[0], batch[1])
model_1.backward(loss)
model_1.step()
# Test whether momentum mask still exist after saving checkpoint
assert optimizer_1.optimizer.adam_freeze_key is True
mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Incorrect momentum mask"
save_folder = os.path.join(tmpdir, "saved_checkpoint")
model_1.save_checkpoint(save_folder, tag=None)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Momentum mask should not change after saving checkpoint"
model_2, optimizer_2, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_2,
)
# Test whether momentum mask stays the same after loading checkpoint
mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Incorrect momentum mask"
model_2.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
for v in optimizer_2.state.values():
assert "worker_error" not in v, f"Incorrect worker error"
assert "server_error" not in v, f"Incorrect server error"
assert optimizer_2.optimizer.adam_freeze_key is True
model_3, optimizer_3, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_3,
)
optimizer_3.optimizer.freeze_step = 20
data_loader = random_dataloader(
model=model_3,
total_samples=50,
hidden_dim=hidden_dim,
device=model_3.device,
)
for n, batch in enumerate(data_loader):
loss = model_3(batch[0], batch[1])
model_3.backward(loss)
model_3.step()
assert optimizer_3.optimizer.adam_freeze_key is True
# Test whether momentum mask stays the same after loading checkpoint
assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask"
model_3.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert ("exp_avg_mask"
not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
for v in optimizer_3.state.values():
assert "worker_error" not in v, f"Incorrect worker error"
assert "server_error" not in v, f"Incorrect server error"
assert optimizer_3.optimizer.adam_freeze_key is False
def test_overflow(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device)
save_folder = os.path.join(tmpdir, "saved_checkpoint")
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0 and n >= 10:
loss = loss * 1000000.0
model.backward(loss)
dist.barrier()
model.step()
dist.barrier()
model.save_checkpoint(save_folder, tag=None)
@pytest.mark.parametrize(
"topo_config",
[
{
"num_pp": 1,
"num_dp": 4
},
{
"num_pp": 2,
"num_dp": 2
},
{
"num_pp": 4,
"num_dp": 1
},
],
)
class TestOneBitAdamFP16Pipeline(DistributedTest):
world_size = 4
def test(self, topo_config):
config_dict = {
"train_batch_size": 16,
"train_micro_batch_size_per_gpu": 4,
"steps_per_print": 20,
"optimizer": {
"type": "OneBitAdam",
"params": {
"lr": 0.00001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7,
"freeze_step": 200,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 0
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"pipeline": {
"seed_layers": True,
"activation_checkpoint_interval": 1
},
}
topo = PipeTopo(**topo_config)
steps = 500 # Must be >=100
# Allocate model for consistent initial weights.
init_net = AlexNetPipe()
test_net = copy.deepcopy(init_net)
test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss())
test_losses = train_cifar(
test_model,
config=config_dict,
num_steps=steps,
fp16=config_dict["fp16"]["enabled"],
)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"])
class TestZeroOneAdamBasic(DistributedTest):
world_size = 2
def test(self, dtype):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "ZeroOneAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"var_freeze_step": 4,
"var_update_scaler": 1,
"local_step_scaler": 1,
"local_step_clipper": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": (dtype == torch.float16),
"loss_scale": 0,
"initial_scale_power": 16,
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(
model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=dtype,
)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestZeroOneAdamExpAvgMask(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "ZeroOneAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"var_freeze_step": 4,
"var_update_scaler": 1,
"local_step_scaler": 1,
"local_step_clipper": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
mask1 = torch.flatten(mask1)
optimizer_grouped_parameters = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model, optimizer, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters,
)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Test whether the momentum mask works
for v in optimizer.state.values():
if v["exp_avg"].size() == mask1.size():
assert torch.allclose(
v["exp_avg"],
v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)),
atol=1e-07,
), f"Momentum mask is not working properly"
class TestZeroOneAdamCheckpointing(DistributedTest):
world_size = 2
def test(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "ZeroOneAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"var_freeze_step": 4,
"var_update_scaler": 1,
"local_step_scaler": 1,
"local_step_clipper": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
mask2 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
mask2[1][col] += 1
mask1 = torch.flatten(mask1)
mask2 = torch.flatten(mask2)
optimizer_grouped_parameters_1 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_2 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask2,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_3 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model_1, optimizer_1, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_1,
)
data_loader = random_dataloader(
model=model_1,
total_samples=10,
hidden_dim=hidden_dim,
device=model_1.device,
)
for n, batch in enumerate(data_loader):
loss = model_1(batch[0], batch[1])
model_1.backward(loss)
model_1.step()
# Test whether momentum mask still exist after saving checkpoint
mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Incorrect momentum mask"
save_folder = os.path.join(tmpdir, "saved_checkpoint")
model_1.save_checkpoint(save_folder, tag=None)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Momentum mask should not change after saving checkpoint"
model_2, optimizer_2, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_2,
)
# Test whether momentum mask stays the same after loading checkpoint
mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Incorrect momentum mask"
model_2.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
for v in optimizer_2.state.values():
assert "worker_error" not in v, f"Incorrect worker error"
assert "server_error" not in v, f"Incorrect server error"
model_3, optimizer_3, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_3,
)
optimizer_3.optimizer.freeze_step = 20
data_loader = random_dataloader(
model=model_3,
total_samples=50,
hidden_dim=hidden_dim,
device=model_3.device,
)
for n, batch in enumerate(data_loader):
loss = model_3(batch[0], batch[1])
model_3.backward(loss)
model_3.step()
# Test whether momentum mask stays the same after loading checkpoint
assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask"
model_3.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert ("exp_avg_mask"
not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
for v in optimizer_3.state.values():
assert "worker_error" not in v, f"Incorrect worker error"
assert "server_error" not in v, f"Incorrect server error"
def test_overflow(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "ZeroOneAdam",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"var_freeze_step": 4,
"var_update_scaler": 1,
"local_step_scaler": 1,
"local_step_clipper": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device)
save_folder = os.path.join(tmpdir, "saved_checkpoint")
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0 and n >= 10:
loss = loss * 1000000.0
model.backward(loss)
dist.barrier()
model.step()
dist.barrier()
model.save_checkpoint(save_folder, tag=None)
@pytest.mark.parametrize(
"topo_config",
[
{
"num_pp": 1,
"num_dp": 4
},
{
"num_pp": 2,
"num_dp": 2
},
{
"num_pp": 4,
"num_dp": 1
},
],
)
class TestZeroOneAdamFP16Pipeline(DistributedTest):
world_size = 4
def test(self, topo_config):
config_dict = {
"train_batch_size": 16,
"train_micro_batch_size_per_gpu": 4,
"steps_per_print": 20,
"optimizer": {
"type": "ZeroOneAdam",
"params": {
"lr": 0.00001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7,
"var_freeze_step": 4,
"var_update_scaler": 1,
"local_step_scaler": 1,
"local_step_clipper": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 0
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"pipeline": {
"seed_layers": True,
"activation_checkpoint_interval": 1
},
}
topo = PipeTopo(**topo_config)
steps = 500 # Must be >=100
# Allocate model for consistent initial weights.
init_net = AlexNetPipe()
test_net = copy.deepcopy(init_net)
test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss())
test_losses = train_cifar(
test_model,
config=config_dict,
num_steps=steps,
fp16=config_dict["fp16"]["enabled"],
)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"])
class TestOneBitLambBasic(DistributedTest):
world_size = 2
def test(self, dtype):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitLamb",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"max_coeff": 0.3,
"min_coeff": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
"coeff_beta": 0.9,
"factor_max": 1.0,
"factor_min": 0.5,
"factor_threshold": 0.1,
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": (dtype == torch.float16),
"loss_scale": 0,
"initial_scale_power": 16,
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(
model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=dtype,
)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
class TestOneBitLampExpAvgMask(DistributedTest):
world_size = 2
def test(self):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitLamb",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"max_coeff": 0.3,
"min_coeff": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
"coeff_beta": 0.9,
"factor_max": 1.0,
"factor_min": 0.5,
"factor_threshold": 0.1,
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
optimizer_grouped_parameters = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model, optimizer, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters,
)
data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
# Test whether the momentum mask works
for v in optimizer.state.values():
if v["exp_avg"].size() == mask1.size():
assert torch.allclose(
v["exp_avg"],
v["exp_avg"].mul_(mask1.to(device=v["exp_avg"].device)),
atol=1e-07,
), f"Momentum mask is not working properly"
class TestOneBitLambCheckpointing(DistributedTest):
world_size = 2
def test(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitLamb",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"max_coeff": 0.3,
"min_coeff": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
"coeff_beta": 0.9,
"factor_max": 1.0,
"factor_min": 0.5,
"factor_threshold": 0.1,
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
param_optimizer = list(model.named_parameters())
mask1 = torch.zeros_like(param_optimizer[0][1].data)
mask2 = torch.zeros_like(param_optimizer[0][1].data)
for col in range(mask1.size()[1]):
mask1[0][col] += 1
mask2[1][col] += 1
optimizer_grouped_parameters_1 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask1,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_2 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01,
"exp_avg_mask": mask2,
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
optimizer_grouped_parameters_3 = [
{
"params": [param_optimizer[0][1]],
"weight_decay": 0.01
},
{
"params": [param_optimizer[1][1]],
"weight_decay": 0.01
},
]
model_1, optimizer_1, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_1,
)
data_loader = random_dataloader(
model=model_1,
total_samples=10,
hidden_dim=hidden_dim,
device=model_1.device,
)
for n, batch in enumerate(data_loader):
loss = model_1(batch[0], batch[1])
model_1.backward(loss)
model_1.step()
# Test whether momentum mask still exist after saving checkpoint
assert optimizer_1.optimizer.lamb_freeze_key is True
mask1 = mask1.to(device=optimizer_1.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Incorrect momentum mask"
scaling_coeff_1 = []
for v in optimizer_1.state.values():
assert "scaling_coeff" in v, f"Incorrect scaling_coeff"
scaling_coeff_1.append(v["scaling_coeff"])
save_folder = os.path.join(tmpdir, "saved_checkpoint")
model_1.save_checkpoint(save_folder, tag=None)
assert torch.allclose(optimizer_1.param_groups[0]["exp_avg_mask"], mask1,
atol=1e-07), f"Momentum mask should not change after saving checkpoint"
model_2, optimizer_2, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_2,
)
# Test whether momentum mask stays the same after loading checkpoint
mask2 = mask2.to(device=optimizer_2.param_groups[0]["exp_avg_mask"].device)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Incorrect momentum mask"
model_2.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert torch.allclose(optimizer_2.param_groups[0]["exp_avg_mask"], mask2,
atol=1e-07), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
assert len(optimizer_2.optimizer.worker_errors) == 0, f"Incorrect worker error"
assert len(optimizer_2.optimizer.server_errors) == 0, f"Incorrect server error"
# Test whether scaling_coeffs is loaded correctly
scaling_coeff_2 = []
for v in optimizer_2.state.values():
assert "scaling_coeff" in v, f"Incorrect scaling_coeff"
scaling_coeff_2.append(v["scaling_coeff"])
assert list(sorted(scaling_coeff_2)) == list(sorted(scaling_coeff_1)), f"Incorrect scaling_coeffs"
assert optimizer_2.optimizer.lamb_freeze_key is True
model_3, optimizer_3, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=optimizer_grouped_parameters_3,
)
optimizer_3.optimizer.freeze_step = 20
data_loader = random_dataloader(
model=model_3,
total_samples=50,
hidden_dim=hidden_dim,
device=model_3.device,
)
for n, batch in enumerate(data_loader):
loss = model_3(batch[0], batch[1])
model_3.backward(loss)
model_3.step()
assert optimizer_3.optimizer.lamb_freeze_key is True
# Test whether momentum mask stays the same after loading checkpoint
assert ("exp_avg_mask" not in optimizer_3.param_groups[0]), f"Incorrect momentum mask"
model_3.load_checkpoint(
save_folder,
tag=None,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
assert ("exp_avg_mask"
not in optimizer_3.param_groups[0]), f"Momentum mask should not change after loading checkpoint"
# Test whether worker&server error is reset
assert len(optimizer_3.optimizer.worker_errors) == 0, f"Incorrect worker error"
assert len(optimizer_3.optimizer.server_errors) == 0, f"Incorrect server error"
# Test whether scaling_coeffs, lamb_coeff_freeze, last_factor are reset
for v in optimizer_3.state.values():
assert v["lamb_coeff_freeze"] == 0.0, f"Incorrect lamb_coeff_freeze"
assert v["last_factor"] == 1.0, f"Incorrect last_factor"
assert "scaling_coeff" not in v, f"Incorrect scaling_coeff"
assert optimizer_3.optimizer.lamb_freeze_key is False
def test_overflow(self, tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "OneBitLamb",
"params": {
"lr": 0.00015,
"weight_decay": 0.01,
"max_coeff": 0.3,
"min_coeff": 0.01,
"freeze_step": 2,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
"coeff_beta": 0.9,
"factor_max": 1.0,
"factor_min": 0.5,
"factor_threshold": 0.1,
},
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model, total_samples=100, hidden_dim=hidden_dim, device=model.device)
save_folder = os.path.join(tmpdir, "saved_checkpoint")
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0 and n >= 10:
loss = loss * 1000000.0
model.backward(loss)
dist.barrier()
model.step()
dist.barrier()
model.save_checkpoint(save_folder, tag=None)
@pytest.mark.parametrize(
"topo_config",
[
{
"num_pp": 1,
"num_dp": 4
},
{
"num_pp": 2,
"num_dp": 2
},
{
"num_pp": 4,
"num_dp": 1
},
],
)
class TestOneBitLambFP16Pipeline(DistributedTest):
world_size = 4
def test(self, topo_config):
config_dict = {
"train_batch_size": 16,
"train_micro_batch_size_per_gpu": 4,
"steps_per_print": 20,
"optimizer": {
"type": "OneBitLamb",
"params": {
"lr": 0.00001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7,
"freeze_step": 200,
"cuda_aware": False,
"comm_backend_name": get_accelerator().communication_backend_name(),
},
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 0
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 16
},
"pipeline": {
"seed_layers": True,
"activation_checkpoint_interval": 1
},
}
topo = PipeTopo(**topo_config)
steps = 500 # Must be >=100
# Allocate model for consistent initial weights.
init_net = AlexNetPipe()
test_net = copy.deepcopy(init_net)
test_model = PipelineModule(layers=test_net.to_layers(), topology=topo, loss_fn=nn.CrossEntropyLoss())
test_losses = train_cifar(
test_model,
config=config_dict,
num_steps=steps,
fp16=config_dict["fp16"]["enabled"],
)
@pytest.mark.sequential
class TestCompressedAllReduceBasic(DistributedTest):
world_size = 2
def test(self, tmpdir):
from deepspeed.runtime.comm.nccl import NcclBackend
size = dist.get_world_size()
rank = dist.get_rank()
backend = NcclBackend()
local_rank = dist.get_rank()
device = torch.device(get_accelerator().device_name(), dist.get_rank())
# A simulated compression function using deepspeed.comm
def torch_sim(a):
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
a_sign = None
worker_error = a - a_compressed
dist.all_reduce(a_compressed)
a_compressed.mul_(1 / dist.get_world_size())
a_server_sign = (a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
a_list = torch.chunk(a_compressed, chunks=dist.get_world_size())
server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list]
a_sign_list = torch.chunk(a_server_sign, dist.get_world_size())
a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())])
rank = dist.get_rank()
server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank]
get_accelerator().synchronize()
dist.barrier()
return a_server_compressed, worker_error, server_error
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
a_torch, worker_error_torch, server_error_torch = torch_sim(a)
get_accelerator().empty_cache()
a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank)
threshold = 1e-6
magnitude_threshold = 1e-6
diff_mask = (a_after - a_torch) > threshold
diff_server_mask = torch.chunk(diff_mask, size)[rank]
mpi_server = torch.chunk(a_after, size)[rank] + server_error
torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch
# If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic
# The test would skip those numbers that are too small in compensated_server_m
check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold
if torch.sum(check_mag_mask) != 0:
print("Fails at {} of positions".format(torch.sum(check_mag_mask)))
assert torch.sum(diff_server_mask) == 0 or torch.sum(check_mag_mask) == 0
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed.comm as dist
import deepspeed
from unit.common import DistributedTest, DistributedFixture, get_master_port
from unit.simple_model import SimpleModel
from deepspeed.accelerator import get_accelerator
import pytest
class TestInit(DistributedTest):
world_size = 3
def test(self):
assert dist.is_initialized()
assert dist.get_world_size() == 3
assert dist.get_rank() < 3
# Demonstration of pytest's parameterization and fixtures
@pytest.fixture(params=["hello"])
def greeting(request):
return request.param
@pytest.mark.parametrize("number,color", [(1138, "purple")])
class TestDistArgs(DistributedTest):
world_size = 2
""" Classes that use DistributedTest class must define a test* method """
@pytest.mark.parametrize("shape", ["icosahedron"])
def test(self, number, color, shape, greeting):
"""Ensure that we can parse args to DistributedTest methods. """
assert dist.get_world_size() == 2
assert number == 1138
assert color == "purple"
assert shape == "icosahedron"
assert greeting == "hello"
# Demonstration of distributed tests grouped in single class
@pytest.mark.parametrize("number", [1138])
class TestGroupedDistTest(DistributedTest):
world_size = 2
def test_one(self, number):
assert dist.get_world_size() == 2
assert number == 1138
def test_two(self, number, color="purple"):
assert dist.get_world_size() == 2
assert number == 1138
assert color == "purple"
# Demonstration of world_size override
class TestWorldSizeOverrideDistTest(DistributedTest):
world_size = 2
def test_world_size_2(self):
assert dist.get_world_size() == 2
@pytest.mark.world_size(1)
def test_world_size_1(self):
assert dist.get_world_size() == 1
# Demonstration of the DistributedFixture class
@pytest.fixture(params=[2, 4])
def val1(request):
return request.param
@pytest.fixture(params=[16, 32])
def val2(request):
return request.param
class distributed_fixture(DistributedFixture):
world_size = 2
def run(self, class_tmpdir, val1, val2):
assert int(os.environ["WORLD_SIZE"]) == self.world_size
local_rank = os.environ["LOCAL_RANK"]
file_path = os.path.join(class_tmpdir, f"checkpoint-{local_rank}.pt")
with open(file_path, "w") as f:
f.write(f"{local_rank},{val1},{val2}")
class TestDistributedFixture(DistributedTest):
world_size = 1
def test(self, distributed_fixture, class_tmpdir, val1, val2):
for rank in range(2):
file_path = os.path.join(class_tmpdir, f"checkpoint-{rank}.pt")
with open(file_path, "r") as f:
chkpt = f.read()
assert chkpt == f"{rank},{val1},{val2}"
assert int(os.environ["WORLD_SIZE"]) == 1
class TestDistAllReduce(DistributedTest):
device_count = get_accelerator().device_count()
if device_count >= 4:
world_size = [1, 2, 4]
elif device_count >= 2:
world_size = [1, 2]
else:
world_size = [1]
def test(self):
x = torch.ones(1, 3).to(get_accelerator().device_name()) * (dist.get_rank() + 1)
sum_of_ranks = (dist.get_world_size() * (dist.get_world_size() + 1)) // 2
result = torch.ones(1, 3).to(get_accelerator().device_name()) * sum_of_ranks
dist.all_reduce(x)
assert torch.all(x == result)
@pytest.mark.parametrize("dist_init_required", [True, False, None])
class TestDistInit(DistributedTest):
init_distributed = False
def test_already_init(self, dist_init_required):
torch.distributed.init_process_group(get_accelerator().communication_backend_name())
deepspeed.init_distributed(get_accelerator().communication_backend_name(),
dist_init_required=dist_init_required)
def test_no_init(self, dist_init_required):
if dist_init_required or dist_init_required is None:
deepspeed.init_distributed(get_accelerator().communication_backend_name(),
dist_init_required=dist_init_required)
else:
# torch.dist is not done and for some reason the user says they don't want it done
with pytest.raises(Exception):
deepspeed.init_distributed(get_accelerator().communication_backend_name(),
dist_init_required=dist_init_required)
class TestDistInitNoEnv(DistributedTest):
world_size = 1
init_distributed = False
set_dist_env = False
def test(self):
torch.distributed.init_process_group(backend=get_accelerator().communication_backend_name(),
init_method=f"tcp://127.0.0.1:{get_master_port()}",
world_size=1,
rank=0)
assert torch.distributed.is_initialized()
deepspeed.init_distributed(get_accelerator().communication_backend_name(), auto_mpi_discovery=True)
@pytest.mark.parametrize("dist_init_required", [True, False])
class TestDistInitWithModel(DistributedTest):
init_distributed = False
def test_already_init(self, dist_init_required):
torch.distributed.init_process_group(get_accelerator().communication_backend_name())
model = SimpleModel(4)
config_dict = {"train_micro_batch_size_per_gpu": 1, "optimizer": {"type": "Adam", "params": {}}}
engine, *_ = deepspeed.initialize(model=model,
config=config_dict,
model_parameters=model.parameters(),
dist_init_required=dist_init_required)
def test_no_init(self, dist_init_required):
model = SimpleModel(4)
config_dict = {"train_micro_batch_size_per_gpu": 1, "optimizer": {"type": "Adam", "params": {}}}
if dist_init_required:
engine, *_ = deepspeed.initialize(model=model,
config=config_dict,
model_parameters=model.parameters(),
dist_init_required=dist_init_required)
else:
# torch.dist is not done and for some reason the user says they don't want it done
with pytest.raises(Exception):
engine, *_ = deepspeed.initialize(model=model,
config=config_dict,
model_parameters=model.parameters(),
dist_init_required=dist_init_required)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pytest
from deepspeed.utils.zero_to_fp32 import get_optim_files
@pytest.mark.parametrize('num_checkpoints', [1, 2, 12, 24])
def test_get_optim_files(tmpdir, num_checkpoints):
saved_files = []
for i in range(num_checkpoints):
file_name = "zero_" + str(i) + "_optim_states.pt"
path_name = os.path.join(tmpdir, file_name)
saved_files.append(path_name)
with open(path_name, "w") as f:
f.write(file_name)
loaded_files = get_optim_files(tmpdir)
for lf, sf in zip(loaded_files, saved_files):
assert lf == sf
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import pytest
from unit.simple_model import SimpleModel
from deepspeed import OnDevice
from packaging import version as pkg_version
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
@pytest.mark.parametrize('device', ['meta', get_accelerator().device_name(0)])
class TestOnDevice(DistributedTest):
world_size = 1
def test_on_device(self, device):
if device == "meta" and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"):
pytest.skip("meta tensors only became stable after torch 1.10")
with OnDevice(dtype=torch.half, device=device):
model = SimpleModel(4)
for p in model.parameters():
assert p.device == torch.device(device)
assert p.dtype == torch.half
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.utils.groups import _get_expert_parallel_ranks
def test_get_expert_parallel_ranks():
"""
Example - E + M + D parallel
world_size = 16
model_degree = 2
expert_degree = 4 # number of experts in same group
mp_group = [0, 1], [2,3], [4,5] ...
data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15]
expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15]
expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
"""
expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(world_size=16,
model_parallel_size_=2,
expert_parallel_size_=4)
assert expert_parallel_groups == [
[0, 2, 4, 6],
[8, 10, 12, 14],
[1, 3, 5, 7],
[9, 11, 13, 15],
]
assert expert_data_parallel_groups == [
[0, 8],
[2, 10],
[4, 12],
[6, 14],
[1, 9],
[3, 11],
[5, 13],
[7, 15],
]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import pytest
import deepspeed
from deepspeed.profiling.flops_profiler import get_model_profile
from unit.simple_model import SimpleModel, random_dataloader
from unit.common import DistributedTest
from unit.util import required_minimum_torch_version
pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=3),
reason='requires Pytorch version 1.3 or above')
def within_range(val, target, tolerance):
return abs(val - target) / target < tolerance
TOLERANCE = 0.05
class LeNet5(torch.nn.Module):
def __init__(self, n_classes):
super(LeNet5, self).__init__()
self.feature_extractor = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1),
torch.nn.Tanh(),
torch.nn.AvgPool2d(kernel_size=2),
torch.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
torch.nn.Tanh(),
torch.nn.AvgPool2d(kernel_size=2),
torch.nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),
torch.nn.Tanh(),
)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(in_features=120, out_features=84),
torch.nn.Tanh(),
torch.nn.Linear(in_features=84, out_features=n_classes),
)
def forward(self, x):
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
probs = torch.nn.functional.softmax(logits, dim=1)
return logits, probs
class TestFlopsProfiler(DistributedTest):
world_size = 1
def test(self):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
}
},
"zero_optimization": {
"stage": 0
},
"fp16": {
"enabled": True,
},
"flops_profiler": {
"enabled": True,
"step": 1,
"module_depth": -1,
"top_modules": 3,
},
}
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.half)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
if n == 3: break
assert within_range(model.flops_profiler.flops, 200, tolerance=TOLERANCE)
assert model.flops_profiler.params == 110
def test_flops_profiler_in_inference(self):
mod = LeNet5(10)
batch_size = 1024
input = torch.randn(batch_size, 1, 32, 32)
flops, macs, params = get_model_profile(
mod,
tuple(input.shape),
print_profile=True,
detailed=True,
module_depth=-1,
top_modules=3,
warm_up=1,
as_string=False,
ignore_modules=None,
)
print(flops, macs, params)
assert within_range(flops, 866076672, TOLERANCE)
assert within_range(macs, 426516480, TOLERANCE)
assert params == 61706
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import pytest
import random
import numpy as np
from unit.megatron_model import get_gpt2_model
from deepspeed.compression.compress import init_compression
from unit.modeling import BertConfig
from unit.modelingpreln import BertEncoder as BertEncoderPreln
from deepspeed.compression.basic_layer import LinearLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
from deepspeed.compression.helper import convert_conv1d_to_linear
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
from unit.util import required_minimum_torch_version, required_maximum_torch_version
pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5),
reason='Megatron-LM package requires Pytorch version 1.5 or above')
def reset_random(seed=1234):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
get_accelerator().manual_seed_all(seed)
def create_bert_model():
hidden_size = 384
num_layers = 2
heads = 12
dropout_ratio = 0.1
bert_config = BertConfig(vocab_size_or_config_json_file=119547,
hidden_size=hidden_size,
num_hidden_layers=num_layers,
num_attention_heads=heads,
intermediate_size=hidden_size * 4,
hidden_act="gelu",
hidden_dropout_prob=dropout_ratio,
attention_probs_dropout_prob=dropout_ratio,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.2)
weights = []
biases = []
for i in range(4):
weights.append(torch.nn.Parameter(torch.Tensor(hidden_size, hidden_size)))
weights.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
weights.append(torch.nn.Parameter(torch.Tensor(hidden_size * 4, hidden_size)))
weights.append(torch.nn.Parameter(torch.Tensor(hidden_size, hidden_size * 4)))
weights.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
biases.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
for i in range(4):
biases.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
biases.append(torch.nn.Parameter(torch.Tensor(hidden_size * 4)))
biases.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
biases.append(torch.nn.Parameter(torch.Tensor(hidden_size)))
return BertEncoderPreln(bert_config, weights, biases)
class Conv1D(torch.nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`): The number of output features.
nx (`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
self.weight = torch.nn.Parameter(w)
self.bias = torch.nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf, )
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(size_out)
return x
def create_conv1d_model():
nf = 128
nx = 128
return torch.nn.ModuleList([Conv1D(nf, nx) for i in range(4)])
class TestCompression(DistributedTest):
def setup_method(self, method):
reset_random()
def get_ds_config(self):
ds_config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True
},
"compression_training": {
"weight_quantization": {
"shared_parameters": {
"enabled": True,
"quantizer_kernel": False,
"schedule_offset": 50,
"quantize_groups": 1,
"quantize_verbose": False,
"quantization_type": "asymmetric",
"rounding": "nearest",
"fp16_mixed_quantize": {
"enabled": False,
"quantize_change_ratio": 0.001
}
},
"different_groups": {
"wq1": {
"params": {
"start_bits": 12,
"target_bits": 8,
"quantization_period": 50
},
"modules": ["attention.self", "intermediate"]
},
"wq2": {
"params": {
"start_bits": 12,
"target_bits": 4,
"quantization_period": 50
},
"modules": ["attention.output"]
}
}
},
"activation_quantization": {
"shared_parameters": {
"enabled": True,
"quantization_type": "asymmetric",
"range_calibration": "dynamic",
"schedule_offset": 50
},
"different_groups": {
"aq1": {
"params": {
"bits": 8
},
"modules": ["attention.output"]
}
}
},
"sparse_pruning": {
"shared_parameters": {
"enabled": True,
"schedule_offset": 30,
"method": "l1"
},
"different_groups": {
"sp1": {
"params": {
"dense_ratio": 0.5
},
"modules": ["attention.self"]
}
}
},
"row_pruning": {
"shared_parameters": {
"enabled": True,
"schedule_offset": 20,
"method": "topk"
},
"different_groups": {
"rp1": {
"params": {
"dense_ratio": 0.5
},
"modules": ["intermediate.dense"],
"related_modules": [["layer.\\w+.output.dense"]]
}
}
},
"head_pruning": {
"shared_parameters": {
"enabled": True,
"schedule_offset": 10,
"method": "topk",
"num_heads": 12
},
"different_groups": {
"rp1": {
"params": {
"dense_ratio": 0.5
},
"modules": ["attention.output.dense"],
"related_modules": [["self.query", "self.key", "self.value"]]
}
}
}
}
}
return ds_config_dict
def test_linear_layer_compress(self, tmpdir):
model = create_bert_model()
compressed_model = init_compression(model, self.get_ds_config())
assert isinstance(compressed_model.layer[0].attention.self.query, LinearLayer_Compress)
assert isinstance(compressed_model.layer[0].attention.self.key, LinearLayer_Compress)
assert isinstance(compressed_model.layer[0].attention.self.value, LinearLayer_Compress)
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_mpu_compress(self, tmpdir):
if not required_maximum_torch_version(major_version=1, minor_version=13):
pytest.skip("megatron not compatible with torch >1.13")
from megatron import mpu
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults)
compressed_model = init_compression(model, self.get_ds_config(), mpu=mpu)
assert isinstance(compressed_model.module.language_model.transformer.layers[0].attention.query_key_value,
ColumnParallelLinear_Compress)
assert isinstance(compressed_model.module.language_model.transformer.layers[0].attention.dense,
RowParallelLinear_Compress)
assert isinstance(compressed_model.module.language_model.transformer.layers[0].mlp.dense_h_to_4h,
ColumnParallelLinear_Compress)
assert isinstance(compressed_model.module.language_model.transformer.layers[0].mlp.dense_4h_to_h,
RowParallelLinear_Compress)
def test_conv1d_convertion(self, tmpdir):
model = create_conv1d_model()
compressed_model = convert_conv1d_to_linear(model, Conv1D)
assert isinstance(compressed_model[0], torch.nn.Linear)
assert isinstance(compressed_model[1], torch.nn.Linear)
assert isinstance(compressed_model[2], torch.nn.Linear)
assert isinstance(compressed_model[3], torch.nn.Linear)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed
import pytest
import random
import numpy as np
import deepspeed.comm as dist
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest, DistributedFixture
from unit.megatron_model import get_gpt2_model, get_megatron_version
from unit.util import required_minimum_torch_version, required_maximum_torch_version
pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5),
reason='Megatron-LM package requires Pytorch version 1.5 or above')
pytestmark = pytest.mark.skipif(not required_maximum_torch_version(major_version=1, minor_version=13),
reason='Megatron-LM package requires Pytorch version 1.13 or below')
# TODO: integrated testing of TP and ZeRO 1/2/3
def get_deepspeed_model(model):
ds_config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
}
from megatron import mpu
model, _, _, _ = deepspeed.initialize(model=model,
mpu=mpu,
model_parameters=model.parameters(),
config=ds_config_dict)
return model
class ConfigurableMP(DistributedTest):
@pytest.fixture(autouse=True)
def reset_random(self, seed=1234):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
get_accelerator().manual_seed_all(seed)
@pytest.fixture
def inputs(self, bs=1, seq_len=20):
input_ids = torch.randint(low=0, high=1000, size=(bs, seq_len))
position_ids = torch.randint(low=0, high=2, size=(bs, seq_len))
attention_mask = torch.randint(low=0, high=2, size=(bs, seq_len), dtype=torch.bool)
return [input_ids, position_ids, attention_mask]
class TestConfigurableMP(ConfigurableMP):
@pytest.mark.world_size(1)
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_gpt2_basic(self, tmpdir, inputs):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults)
model = get_deepspeed_model(model)
model.eval()
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
tag = 'mp_1'
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict)
dist.barrier()
model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False)
test = model(inputs[0], inputs[1], inputs[2])
assert torch.allclose(baseline, test,
atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}"
@pytest.mark.world_size(2)
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_gpt2_mp2_no_resize(self, tmpdir, inputs):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults, mp_size=2)
model = get_deepspeed_model(model)
model.eval()
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
tag = 'mp_2'
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict)
dist.barrier()
model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False)
device_name = get_accelerator().device_name()
test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
assert torch.allclose(baseline, test, rtol=1.0,
atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}"
# This fixture provides the baseline model with mp=2 to TestConfigurableMPResize
class baseline_mp2(DistributedFixture):
world_size = 2
def run(self, inputs, class_tmpdir):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
model = get_gpt2_model(args_defaults, mp_size=self.world_size)
model = get_deepspeed_model(model)
model.eval()
with torch.no_grad():
device_name = get_accelerator().device_name()
baseline = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
if dist.get_rank() == 0:
save_path = os.path.join(class_tmpdir, "output.pt")
torch.save(baseline.cpu(), save_path)
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(class_tmpdir, client_state=state_dict)
class TestConfigurableResizeMP(ConfigurableMP):
world_size = [1, 4]
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test(self, baseline_mp2, inputs, class_tmpdir):
args_defaults = {
'num_layers': 2,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
world_size = os.environ["WORLD_SIZE"]
model = get_gpt2_model(args_defaults, mp_size=world_size)
model = get_deepspeed_model(model)
model.eval()
with torch.no_grad():
model.load_checkpoint(class_tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False)
device_name = get_accelerator().device_name()
test = model(inputs[0].to(device_name), inputs[1].to(device_name), inputs[2].to(device_name))
if dist.get_rank() == 0:
load_path = os.path.join(class_tmpdir, "output.pt")
baseline = torch.load(load_path)
test = test.cpu()
assert torch.allclose(
baseline, test,
atol=1e-03), f"Baseline output {baseline} is not equal to save-then-load output {test}"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed
import pytest
import random
import numpy as np
import deepspeed.comm as dist
from unit.common import DistributedTest, DistributedFixture
from unit.megatron_model import get_megatron_version
from unit.megatron_model import MockGPT2ModelPipe as GPT2ModelPipe
from deepspeed.utils import RepeatingLoader
from deepspeed.accelerator import get_accelerator
from unit.util import required_minimum_torch_version, required_maximum_torch_version
pytestmark = pytest.mark.skipif(not required_minimum_torch_version(major_version=1, minor_version=5),
reason='Megatron-LM package requires Pytorch version 1.5 or above')
pytestmark = pytest.mark.skipif(not required_maximum_torch_version(major_version=1, minor_version=13),
reason='Megatron-LM package requires Pytorch version 1.13 or below')
def get_deepspeed_model(model):
ds_config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
}
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=ds_config_dict)
return model.to(get_accelerator().device_name())
def get_topology(mp, pp, world_size):
assert world_size % (pp * mp) == 0
dp = world_size // (pp * mp)
from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology
topo = PipeModelDataParallelTopology(num_pp=pp, num_mp=mp, num_dp=dp)
return topo
class ConfigurablePP(DistributedTest):
@pytest.fixture(autouse=True)
def reset_random(self, seed=1234):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
get_accelerator().manual_seed_all(seed)
@pytest.fixture
def inputs(self, bs=1, seq_len=1, hidden_size=128):
hidden_states = torch.randn(bs, seq_len, hidden_size)
attention_mask = torch.randint(low=0, high=2, size=(bs, seq_len), dtype=torch.bool)
return (hidden_states, attention_mask)
class TestConfigurablePP(ConfigurablePP):
mp_size = 2
pp_size = 2
world_size = 4 # mp_size * pp_size
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_pp_basic(self, inputs, tmpdir):
# basic test case, mp_size=2, pp_size=2, verify ckpt saving/loading.
args_defaults = {
'num_layers': 8,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
mp_size = self.mp_size
pp_size = self.pp_size
world_size = self.world_size
topo = get_topology(mp_size, pp_size, world_size)
gpt2_pipe_model = GPT2ModelPipe(num_layers=8,
num_stages=pp_size,
mp_size=mp_size,
args_others=args_defaults,
topo=topo)
model = get_deepspeed_model(gpt2_pipe_model)
tag = 'pp_basic'
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(tmpdir, tag=tag, client_state=state_dict)
if model.is_first_stage() or model.is_last_stage():
loader = RepeatingLoader([(inputs[0], 0)])
data_iter = iter(loader)
else:
data_iter = None
baseline = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None)
dist.barrier()
model.load_checkpoint(tmpdir, tag=tag, load_optimizer_states=False, load_lr_scheduler_states=False)
dist.barrier()
test = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None)
if test is not None:
assert len(baseline) == len(test)
# Compare outputs of each microbatch
for mb in range(len(baseline)):
for b, t in zip(baseline[mb], test[mb]):
if b.is_floating_point(): # don't compare masks
assert torch.allclose(
b, t,
atol=1e-07), f"Baseline output {baseline} is not equal to save-then-load output {test}"
# Fixture for defining the checkpoint path since all tests in
# TestConfigurableResizePP will use the same tmpdir
@pytest.fixture
def checkpoint_tag(mp_size, pp_size, mp_resize, pp_resize):
return f"{mp_size}-{pp_size}-{mp_resize}-{pp_resize}"
# Base class for creating / saving model output for baseline models. This is
# not meant to be used directly as a fixture to any classes
class _baseline(DistributedFixture):
world_size = None
def run(self, inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size):
assert int(os.environ["WORLD_SIZE"]) == (pp_size *
mp_size), "world size does not match provided pp_size and mp_size"
args_defaults = {
'num_layers': 8,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
topo = get_topology(mp_size, pp_size, mp_size * pp_size)
gpt2_pipe_model = GPT2ModelPipe(num_layers=8,
num_stages=pp_size,
mp_size=mp_size,
args_others=args_defaults,
topo=topo)
model = get_deepspeed_model(gpt2_pipe_model)
with torch.no_grad():
inputs = [x.to(get_accelerator().device_name()) for x in inputs]
if model.is_first_stage() or model.is_last_stage():
loader = RepeatingLoader([(inputs[0], 0)])
data_iter = iter(loader)
else:
data_iter = None
baseline = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None)
if baseline is not None:
# baseline should be [[hidden, True]]]
assert len(baseline) == 1
assert len(baseline[0]) == 1
assert torch.is_tensor(baseline[0][0])
save_path = os.path.join(class_tmpdir, f"output-{checkpoint_tag}.pt")
torch.save(baseline[0][0].cpu(), save_path)
state_dict = {}
state_dict['checkpoint_version'] = get_megatron_version()
model.save_checkpoint(class_tmpdir, tag=checkpoint_tag, client_state=state_dict)
# This may look odd, but there is a limitation with DistributedFixture that
# doesn't allow us to reuse a fixture with different worldsizes. This could be
# implemented in conftest.py::pytest_fixture_setup and common.py::DistributedFixture
class baseline_ws1(_baseline):
world_size = 1
class baseline_ws2(_baseline):
world_size = 2
class baseline_ws4(_baseline):
world_size = 4
class TestConfigurableResizePP(ConfigurablePP):
def _test(self, inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize):
args_defaults = {
'num_layers': 8,
'hidden_size': 128,
'num_attention_heads': 8,
'max_position_embeddings': 128,
}
topo = get_topology(mp_resize, pp_resize, mp_resize * pp_resize)
gpt2_pipe_model = GPT2ModelPipe(num_layers=8,
num_stages=pp_resize,
mp_size=mp_resize,
args_others=args_defaults,
topo=topo)
model = get_deepspeed_model(gpt2_pipe_model)
with torch.no_grad():
model.load_checkpoint(class_tmpdir,
tag=checkpoint_tag,
load_optimizer_states=False,
load_lr_scheduler_states=False)
inputs = [x.to(get_accelerator().device_name()) for x in inputs]
if model.is_first_stage() or model.is_last_stage():
loader = RepeatingLoader([(inputs[0], 0)])
data_iter = iter(loader)
else:
data_iter = None
test = model.eval_batch(data_iter=data_iter, compute_loss=False, reduce_output=None)
if test is not None:
# test should be [[hidden, True]]]
assert len(test) == 1
assert len(test[0]) == 1
assert torch.is_tensor(test[0][0])
test = test[0][0].cpu()
load_path = os.path.join(class_tmpdir, f"output-{checkpoint_tag}.pt")
baseline = torch.load(load_path)
assert torch.allclose(
baseline, test,
atol=1e-03), f"Baseline output {baseline} is not equal to save-then-load output {test}"
# These tests are divided by baseline model worldsize and test model worldsize
@pytest.mark.world_size(1)
@pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 2, 1, 1)])
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_world_size_2to1(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws2, mp_size, pp_size, mp_resize,
pp_resize):
self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
@pytest.mark.world_size(1)
@pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(2, 2, 1, 1)])
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_world_size_4to1(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws4, mp_size, pp_size, mp_resize,
pp_resize):
self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
@pytest.mark.world_size(2)
@pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(2, 2, 2, 1)])
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_world_size_4to2(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws4, mp_size, pp_size, mp_resize,
pp_resize):
self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
@pytest.mark.world_size(4)
@pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 1, 2, 2)])
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_world_size_1to4(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws1, mp_size, pp_size, mp_resize,
pp_resize):
self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
@pytest.mark.world_size(4)
@pytest.mark.parametrize("mp_size, pp_size, mp_resize, pp_resize", [(1, 2, 1, 4), (2, 1, 2, 2)])
@pytest.mark.skip(reason="megatron-lm is currently broken so this test cannot be run.")
def test_world_size_2to4(self, inputs, class_tmpdir, checkpoint_tag, baseline_ws2, mp_size, pp_size, mp_resize,
pp_resize):
self._test(inputs, class_tmpdir, checkpoint_tag, mp_size, pp_size, mp_resize, pp_resize)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import time
import torch
import pytest
import itertools
import deepspeed
from deepspeed.git_version_info import torch_info
from unit.common import DistributedTest
from packaging import version as pkg_version
from deepspeed.ops.op_builder import OpBuilder
from transformers import pipeline
from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.roberta.modeling_roberta import RobertaLayer
from huggingface_hub import HfApi
from deepspeed.model_implementations import DeepSpeedTransformerInference
from torch import nn
from deepspeed.accelerator import get_accelerator
rocm_version = OpBuilder.installed_rocm_version()
if rocm_version != (0, 0):
pytest.skip("skip inference tests on rocm for now", allow_module_level=True)
_bert_models = [
"bert-base-cased",
"bert-base-uncased",
"bert-large-cased",
"bert-large-uncased",
"bert-base-multilingual-cased",
"bert-base-multilingual-uncased",
"deepset/minilm-uncased-squad2",
"cross-encoder/ms-marco-MiniLM-L-12-v2",
"dslim/bert-base-NER",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"distilbert-base-cased-distilled-squad",
]
_roberta_models = [
"roberta-large",
"roberta-base",
"deepset/roberta-base-squad2",
"j-hartmann/emotion-english-distilroberta-base",
"Jean-Baptiste/roberta-large-ner-english",
]
_gpt_models = [
"gpt2",
"distilgpt2",
"Norod78/hebrew-bad_wiki-gpt_neo-tiny",
#"EleutherAI/gpt-j-6B", # Removed as this is causing OOM errors randomly
"bigscience/bloom-560m",
]
_opt_models = [
"facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture.
"facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants.
]
_all_models = HfApi().list_models()
test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models)
test_tasks = [
"fill-mask", "question-answering", "text-classification", "token-classification", "text-generation",
"text2text-generation", "summarization", "translation"
]
pytest.all_models = {task: [m.modelId for m in _all_models if m.pipeline_tag == task] for task in test_tasks}
_model_w_tasks = itertools.product(*[test_models, test_tasks])
def _valid_model_task(model_task):
m, t = model_task
return m in pytest.all_models[t]
pytest.models_w_tasks = list(filter(_valid_model_task, _model_w_tasks))
pytest.mt_names = [f"{m}-{t}" for m, t in pytest.models_w_tasks]
"""
These fixtures iterate all combinations of tasks and models, dtype, & cuda_graph
"""
@pytest.fixture(params=pytest.models_w_tasks, ids=pytest.mt_names)
def model_w_task(request):
return request.param
@pytest.fixture(params=[torch.float, torch.half], ids=["fp32", "fp16"])
def dtype(request):
return request.param
@pytest.fixture(params=[True, False], ids=["CG", "noCG"])
def enable_cuda_graph(request):
return request.param
"""
This fixture will validate the configuration
"""
@pytest.fixture()
def invalid_model_task_config(model_w_task, dtype, enable_cuda_graph):
model, task = model_w_task
msg = ""
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
msg = "DS inference injection doesn't work well on older torch versions"
elif model not in pytest.all_models[task]:
msg = f"Not a valid model / task combination: {model} / {task}"
elif enable_cuda_graph and (torch_info["cuda_version"] == "0.0"):
msg = "CUDA not detected, cannot use CUDA Graph"
elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"):
msg = "CUDA Graph is only available in torch versions >= 1.10"
elif "gpt-j-6B" in model:
if dtype != torch.half:
msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
elif enable_cuda_graph:
msg = f"Not enough GPU memory to run {model} with CUDA Graph enabled"
elif "gpt-neox-20b" in model: # TODO: remove this when neox issues resolved
msg = "Skipping gpt-neox-20b for now"
elif ("gpt-neox-20b" in model) and (dtype != torch.half):
msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
elif ("bloom" in model) and (dtype != torch.half):
msg = f"Bloom models only support half precision, cannot use dtype {dtype}"
elif ("bert" not in model.lower()) and enable_cuda_graph:
msg = "Non bert/roberta models do no support CUDA Graph"
return msg
"""
These fixtures can be used to customize the query, inference args, and assert
statement for each combination of model /task
"""
@pytest.fixture
def query(model_w_task):
model, task = model_w_task
angle_bracket_mask_models = ["roberta", "camembert", "esm", "ibert", "luke", "mpnet", "yoso", "mpnet"]
if task == "fill-mask":
if any(map(lambda x: x in model, angle_bracket_mask_models)):
return "Hello I'm a <mask> model."
else:
return "Hell I'm a [MASK] model."
elif task == "question-answering":
return {
"question": "What's my name?",
"context": "My name is Clara and I live in Berkeley",
}
elif task == "text-classification":
return "DeepSpeed is the greatest"
elif task == "token-classification":
return "My name is jean-baptiste and I live in montreal."
elif task == "text-generation":
return "DeepSpeed is the greatest"
elif task == "text2text-generation":
return "Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy"
elif task == "translation" or task == "summarization":
return "Hello, my dog is cute"
else:
NotImplementedError(f'query for task "{task}" is not implemented')
@pytest.fixture
def inf_kwargs(model_w_task):
model, task = model_w_task
if task == "text-generation":
if model == "EleutherAI/gpt-j-6B":
# This model on V100 is hitting memory problems that limit the number of output tokens
return {"do_sample": False, "max_length": 12}
return {"do_sample": False, "max_length": 20}
else:
return {}
def fill_mask_assert(x, y):
return set(res["token_str"] for res in x) == set(res["token_str"] for res in y)
def question_answering_assert(x, y):
return x["answer"] == y["answer"]
def text_classification_assert(x, y):
return set(res["label"] for res in x) == set(res["label"] for res in y)
def token_classification_assert(x, y):
return set(ent["word"] for ent in x) == set(ent["word"] for ent in y)
def text_generation_assert(x, y):
return set(res["generated_text"] for res in x) == set(res["generated_text"] for res in y)
def text2text_generation_assert(x, y):
return set(res["generated_text"] for res in x) == set(res["generated_text"] for res in y)
def translation_assert(x, y):
return set(res["translation_text"] for res in x) == set(res["translation_text"] for res in y)
def summarization_assert(x, y):
return set(res["summary_text"] for res in x) == set(res["summary_text"] for res in y)
@pytest.fixture
def assert_fn(model_w_task):
model, task = model_w_task
assert_fn_dict = {
"fill-mask": fill_mask_assert,
"question-answering": question_answering_assert,
"text-classification": text_classification_assert,
"token-classification": token_classification_assert,
"text-generation": text_generation_assert,
"text2text-generation": text2text_generation_assert,
"translation": translation_assert,
"summarization": summarization_assert
}
assert_fn = assert_fn_dict.get(task, None)
if assert_fn is None:
NotImplementedError(f'assert_fn for task "{task}" is not implemented')
return assert_fn
def check_injection(model):
def verify_injection(module):
for child in module.children():
if isinstance(child, nn.ModuleList):
assert isinstance(child[0], DeepSpeedTransformerInference),\
"DeepSpeed-Inference Transformer kernels has not been injected in the model"
break
else:
verify_injection(child)
verify_injection(model)
"""
Tests
"""
@pytest.mark.inference
class TestModelTask(DistributedTest):
world_size = 1
def test(
self,
model_w_task,
dtype,
enable_cuda_graph,
query,
inf_kwargs,
assert_fn,
invalid_model_task_config,
):
if invalid_model_task_config:
pytest.skip(invalid_model_task_config)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# Load the model on CPU first to avoid OOM for large models @fp32
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
if dtype == torch.half:
pipe.model.half()
# Switch device to GPU after converting to half
device = torch.device(get_accelerator().device_name(local_rank))
pipe.device = device
pipe.model.to(device)
# Warm-up queries for perf measurement
#for i in range(10):
# _ = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
start = time.time()
bs_output = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
bs_time = time.time() - start
pipe.model = deepspeed.init_inference(
pipe.model,
mp_size=1,
dtype=dtype,
replace_with_kernel_inject=True,
enable_cuda_graph=enable_cuda_graph,
)
check_injection(pipe.model)
# Warm-up queries for perf measurement
#for i in range(10):
# _ = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
start = time.time()
ds_output = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
ds_time = time.time() - start
# facebook/opt* and some bigscient/bloom* models are not matching
# baseline exactly, adding an exception to them for now
if ("opt" in model) or ("bloom" in model):
bs_output = pipe(query, **inf_kwargs)
# These performance tests are only measuring the time for a single
# inference request, we just want to check that performance isn't terrible
#assert ds_time <= (bs_time * 1.1)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"),
("EleutherAI/gpt-neox-20b", "text-generation"),
("bigscience/bloom-3b", "text-generation"),
("EleutherAI/gpt-j-6B", "text-generation")],
ids=["gpt-neo", "gpt-neox", "bloom", "gpt-j"])
class TestMPSize(DistributedTest):
world_size = 4
def test(
self,
model_w_task,
dtype,
query,
inf_kwargs,
assert_fn,
invalid_model_task_config,
):
if invalid_model_task_config:
pytest.skip(invalid_model_task_config)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model,
mp_size=self.world_size,
dtype=dtype,
replace_with_kernel_inject=True)
check_injection(pipe.model)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize(
"model_w_task, injection_policy",
[
(("google/t5-v1_1-small", "text2text-generation"), {
T5Block: ('SelfAttention.o', 'EncDecAttention.o', 'DenseReluDense.wo')
}),
(("roberta-large", "fill-mask"), {
RobertaLayer: ('output.dense')
}),
],
ids=["t5", "roberta"],
)
@pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"])
@pytest.mark.parametrize("enable_cuda_graph", [False], ids=["noCG"])
class TestInjectionPolicy(DistributedTest):
world_size = [1, 2]
def test(
self,
model_w_task,
injection_policy,
query,
inf_kwargs,
assert_fn,
invalid_model_task_config,
dtype,
enable_cuda_graph,
):
if invalid_model_task_config:
pytest.skip(invalid_model_task_config)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "2"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model,
mp_size=world_size,
dtype=dtype,
injection_policy=injection_policy)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize(
"model_w_task",
[
("Helsinki-NLP/opus-mt-en-de", "translation"),
],
ids=[
"marian",
],
)
@pytest.mark.parametrize("dtype", [torch.float16], ids=["fp16"])
@pytest.mark.parametrize("enable_cuda_graph", [False], ids=["noCG"])
class TestAutoTensorParallelism(DistributedTest):
world_size = [2]
def test(
self,
model_w_task,
query,
inf_kwargs,
assert_fn,
invalid_model_task_config,
dtype,
enable_cuda_graph,
):
if invalid_model_task_config:
pytest.skip(invalid_model_task_config)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "2"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.nightly
@pytest.mark.parametrize(
"model_family, model_name",
(
["gpt2", "EleutherAI/gpt-neo-2.7B"],
["gpt2", "EleutherAI/gpt-j-6B"],
["gpt2", "gpt2-xl"],
),
)
@pytest.mark.parametrize("task", ["lambada_standard"])
class TestLMCorrectness(DistributedTest):
world_size = 1
def test(self, model_family, model_name, task):
# imports here to avoid import errors when pytest collects tests
import lm_eval
import lm_eval.models
import lm_eval.tasks
import lm_eval.evaluator
local_rank = os.getenv("LOCAL_RANK", "0")
device = torch.device(get_accelerator().device_name(local_rank))
dtype = torch.float
task_dict = lm_eval.tasks.get_task_dict([task])
if 'gpt-j-6B' in model_name:
dtype = torch.half
lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}",
{"device": "cpu"})
setattr(lm, model_family, getattr(lm, model_family).half().to(device))
lm._device = device
else:
lm = lm_eval.models.get_model(model_family).create_from_arg_string(
f"pretrained={model_name}", {"device": get_accelerator().device_name()})
get_accelerator().synchronize()
start = time.time()
bs_output = lm_eval.evaluator.evaluate(lm=lm, task_dict=task_dict)
get_accelerator().synchronize()
bs_time = time.time() - start
ds_model = deepspeed.init_inference(
getattr(lm, model_family),
mp_size=1,
dtype=dtype,
replace_with_kernel_inject=True,
enable_cuda_graph=False,
)
check_injection(ds_model)
setattr(lm, model_family, ds_model)
get_accelerator().synchronize()
start = time.time()
ds_output = lm_eval.evaluator.evaluate(lm=lm, task_dict=task_dict)
get_accelerator().synchronize()
ds_time = time.time() - start
ppl_diff = abs(bs_output["results"][task]["ppl"] - ds_output["results"][task]["ppl"])
#assert ds_time <= bs_time
assert ppl_diff < 0.01
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from unit.common import DistributedTest
from unit.simple_model import create_config_from_dict
@pytest.mark.inference
class TestInferenceConfig(DistributedTest):
world_size = 1
def test_overlap_kwargs(self):
config = {"replace_with_kernel_inject": True}
kwargs = {"replace_with_kernel_inject": True}
engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs)
assert engine._config.replace_with_kernel_inject
def test_overlap_kwargs_conflict(self):
config = {"replace_with_kernel_inject": True}
kwargs = {"replace_with_kernel_inject": False}
with pytest.raises(ValueError):
engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs)
def test_kwargs_and_config(self):
config = {"replace_with_kernel_inject": True}
kwargs = {"dtype": torch.float32}
engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs)
assert engine._config.replace_with_kernel_inject
assert engine._config.dtype == kwargs["dtype"]
def test_json_config(self, tmpdir):
config = {"replace_with_kernel_inject": True}
config_json = create_config_from_dict(tmpdir, config)
engine = deepspeed.init_inference(torch.nn.Module(), config=config_json)
assert engine._config.replace_with_kernel_inject
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pytest
import torch
import deepspeed
from deepspeed.model_implementations import DeepSpeedTransformerInference
from unit.common import DistributedTest, DistributedFixture
from transformers import AutoConfig, AutoModelForCausalLM
import deepspeed.comm as dist
from huggingface_hub import snapshot_download
from transformers.utils import is_offline_mode
def check_dtype(model, expected_dtype):
def find_dtype(module):
for child in module.children():
if isinstance(child, DeepSpeedTransformerInference):
return child.attention.attn_qkvw.dtype
else:
found_dtype = find_dtype(child)
if found_dtype:
return found_dtype
found_dtype = find_dtype(model)
assert found_dtype, "Did not find DeepSpeedTransformerInference in model"
assert (found_dtype == expected_dtype), f"Expected transformer dtype {expected_dtype}, but found {found_dtype}"
@pytest.fixture(
params=["bigscience/bloom-560m", "EleutherAI/gpt-j-6B", "EleutherAI/gpt-neo-125M", "facebook/opt-125m"])
def model_name(request):
return request.param
@pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"])
def dtype(request):
return request.param
class save_shard(DistributedFixture):
world_size = 2
def run(self, model_name, class_tmpdir):
# Only write a checkpoint if one does not exist
if not os.path.isdir(os.path.join(class_tmpdir, model_name)):
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": True,
"dtype": torch.float16,
"enable_cuda_graph": False,
"tensor_parallel": {
"tp_size": world_size
},
"save_mp_checkpoint_path": os.path.join(str(class_tmpdir), model_name),
}
# Load model and save sharded checkpoint
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
model = deepspeed.init_inference(model, config=inf_config)
@pytest.mark.seq_inference
class TestCheckpointShard(DistributedTest):
world_size = 2
def test(self, model_name, dtype, class_tmpdir, save_shard):
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": True,
"dtype": dtype,
"enable_cuda_graph": False,
"tensor_parallel": {
"tp_size": world_size
},
"checkpoint": os.path.join(class_tmpdir, model_name, "ds_inference_config.json"),
}
# Load model on meta tensors
model_config = AutoConfig.from_pretrained(model_name)
# Note that we use half precision to load initially, even for int8
with deepspeed.OnDevice(dtype=torch.float16, device="meta"):
model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16)
model = model.eval()
model = deepspeed.init_inference(model, config=inf_config)
check_dtype(model, dtype)
@pytest.mark.seq_inference
class TestCheckpointShardinAutoTP(DistributedTest):
world_size = 2
def test(self, model_name, class_tmpdir):
def write_checkpoints_json(model_name, class_tmpdir):
import json
from pathlib import Path
local_rank = int(os.getenv("LOCAL_RANK", "0"))
if local_rank == 0:
# download only on first process
cached_repo_dir = snapshot_download(
model_name,
local_files_only=is_offline_mode(),
cache_dir=os.getenv("TRANSFORMERS_CACHE", None),
ignore_patterns=["*.safetensors", "*.msgpack", "*.h5"],
)
file_list = [str(entry) for entry in Path(cached_repo_dir).rglob("*.[bp][it][n]") if entry.is_file()]
data = {"type": "ds_model", "checkpoints": file_list, "version": 1.0}
os.makedirs(os.path.join(class_tmpdir, model_name), exist_ok=True)
json.dump(data, open(os.path.join(class_tmpdir, model_name, "ds_inference_config.json"), "w"))
dist.barrier()
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": False,
"tensor_parallel": {
"tp_size": world_size
},
"checkpoint": os.path.join(class_tmpdir, model_name, "ds_inference_config.json"),
}
write_checkpoints_json(model_name, class_tmpdir)
# Load model on meta tensors
model_config = AutoConfig.from_pretrained(model_name)
# Note that we use half precision to load initially, even for int8
with deepspeed.OnDevice(dtype=torch.bfloat16, device="meta"):
model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16)
model = model.eval()
model = deepspeed.init_inference(model, config=inf_config)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import time
import pytest
import torch
import deepspeed
from transformers import pipeline
from unit.common import DistributedTest
from deepspeed.accelerator import get_accelerator
@pytest.fixture
def query(model, task):
if task == "text-generation":
return "DeepSpeed is"
elif task == "fill-mask":
if "roberta" in model:
return "I am a <mask> model"
else:
return "I am a [MASK] model"
else:
raise NotImplementedError
@pytest.fixture
def inf_kwargs(task):
if task == "text-generation":
return {"do_sample": False, "min_length": 50, "max_length": 50}
else:
return {}
@pytest.mark.inference
@pytest.mark.parametrize("model,task", [
("bert-base-cased", "fill-mask"),
("roberta-base", "fill-mask"),
("gpt2", "text-generation"),
("facebook/opt-125m", "text-generation"),
("bigscience/bloom-560m", "text-generation"),
])
@pytest.mark.parametrize("cuda_graphs", [True, False])
@pytest.mark.parametrize("use_cuda_events", [True, False])
class TestModelProfiling(DistributedTest):
world_size = 1
def test(self, model, task, query, inf_kwargs, cuda_graphs, use_cuda_events, dtype=torch.float16):
if cuda_graphs and "bert" not in model:
pytest.skip(f"CUDA Graph not supported for {model}")
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
pipe = pipeline(task, model, framework="pt", device=get_accelerator().device_name(local_rank))
pipe.model = deepspeed.init_inference(pipe.model,
dtype=dtype,
mp_size=world_size,
replace_with_kernel_inject=True,
enable_cuda_graph=cuda_graphs)
pipe.model.profile_model_time(use_cuda_events=use_cuda_events)
e2e_times = []
model_times = []
for _ in range(10):
get_accelerator().synchronize()
start = time.perf_counter_ns()
r = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
end = time.perf_counter_ns()
e2e_times.append((end - start) / 1e6) # convert ns to ms
model_times.extend(pipe.model.model_times())
for e2e_t, model_t in zip(e2e_times, model_times):
assert e2e_t >= model_t
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import torch
import pytest
import random
import copy
from torch import nn
from unit.modelingpreln import BertEncoder as BertEncoderPreln
from unit.modeling import BertLayerNorm, BertConfig, BertEncoder as BertEncoderPostln
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
def check_equal(first, second, atol=1e-2, verbose=False):
if verbose:
print()
for i, (x, y) in enumerate(zip(first, second)):
x = x[0].cpu().detach().numpy()
y = y[0].cpu().detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i), atol=atol)
def zero_grad(variables):
for variable in variables:
variable.grad.zero_()
device = torch.device(get_accelerator().device_name())
kwargs_fp32 = {'dtype': torch.float, 'device': device, 'requires_grad': True}
kwargs_fp16 = {'dtype': torch.half, 'device': device, 'requires_grad': True}
class DSEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(DSEncoder, self).__init__()
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList([
copy.deepcopy(DeepSpeedTransformerLayer(config, weights, biases)) for _ in range(config.num_hidden_layers)
])
self.grads = []
self.pre_or_post = config.pre_layer_norm
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
raise NotImplementedError("`checkpoint` below is not defined")
#l = 0
#num_layers = len(self.layer)
#chunk_length = math.ceil(math.sqrt(num_layers))
#while l < num_layers:
# hidden_states = checkpoint.checkpoint(
# custom(
# l, # noqa: F821
# l + chunk_length),
# hidden_states,
# attention_mask * 1)
# l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers or checkpoint_activations:
if (self.pre_or_post):
hidden_states = self.FinalLayerNorm(hidden_states)
all_encoder_layers.append(hidden_states)
return all_encoder_layers
def create_models(ds_config):
bert_config = BertConfig(vocab_size_or_config_json_file=119547,
hidden_size=ds_config.hidden_size,
num_hidden_layers=ds_config.num_hidden_layers,
num_attention_heads=ds_config.heads,
batch_size=ds_config.batch_size,
intermediate_size=ds_config.intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=ds_config.hidden_dropout_ratio,
attention_probs_dropout_prob=ds_config.attn_dropout_ratio,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=ds_config.initializer_range,
fp16=ds_config.fp16)
weights = []
biases = []
for i in range(4):
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.hidden_size)))
weights[i].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[4].data.fill_(1.0)
weights.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size, ds_config.hidden_size)))
weights[5].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.intermediate_size)))
weights[6].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[7].data.fill_(1.0)
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[0].data.zero_()
for i in range(4):
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[i + 1].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size)))
biases[5].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[6].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[7].data.zero_()
if (ds_config.pre_layer_norm):
bert_encoder = BertEncoderPreln(bert_config, weights, biases)
else:
bert_encoder = BertEncoderPostln(bert_config, weights, biases)
ds_encoder = DSEncoder(ds_config, weights, biases)
if ds_config.fp16:
bert_encoder.half()
ds_encoder.half()
bert_encoder.to(get_accelerator().device_name())
ds_encoder.to(get_accelerator().device_name())
return bert_encoder, ds_encoder
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def run_forward(ds_config, seq_len, atol=1e-2, verbose=False, test_bsz=None):
set_seed(123)
bert_encoder, ds_encoder = create_models(ds_config)
bsz = ds_config.batch_size if test_bsz is None else test_bsz
# prepare test data
kwargs = kwargs_fp16 if ds_config.fp16 else kwargs_fp32
hidden_states = torch.randn(bsz, seq_len, ds_config.hidden_size, **kwargs)
input_mask = torch.randn(bsz, 1, 1, seq_len, **kwargs)
# run baseline
base_results = bert_encoder(hidden_states,
input_mask,
output_all_encoded_layers=False,
checkpoint_activations=False)
# run ds
ds_results = ds_encoder(hidden_states, input_mask, output_all_encoded_layers=False, checkpoint_activations=False)
# check forward evaluation
check_equal(base_results, ds_results, atol=atol, verbose=verbose)
# FP16 test cases can only run on the devices support FP16.
@pytest.mark.sequential
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
(64,160,128,2,24,False,True),
#(8,2048,2048,32,1,True,True),
(8,160,128,2,3,True,True),
(8,160,128,2,3,False,True),
(8,1600,128,2,3,True,True),
(8,1600,128,25,3,True,True),
(8,1600,128,25,3,False,True),
(8,256,52,4,3,True,True),
(3,1024,51,16,3,True,False),
(3,1024,54,16,3,True,True),
(8,1024,381,16,3,True,False),
(8,1024,384,16,3,True,True),
(8,1024,384,16,3,True,True),
(8,1024,119,16,3,True,False),
(8,1024,120,16,3,True,True),
(8,1024,509,16,3,True,False),
(8,1024,512,16,3,True,True),
(64,1024,56,16,3,False,False),
(64,1024,53,16,3,False,True),
(64,1024,24,16,3,False,False),
(64,1024,21,16,3,False,True),
(8,1024,384,16,3,False,False),
(8,1024,384,16,3,False,True),
(8,1024,512,16,3,False,False),
(8,1024,511,16,3,False,True),
(8,1536,128,24,3,False,False),
(8,1536,128,24,3,False,True),
(8,2048,128,32,3,False,False),
(8,2048,128,32,3,False,True),
(8,2560,128,40,3,False,False),
(8,2560,128,40,3,False,True),
(8,128,128,2,3,True,False),
(8,128,128,2,3,True,True),
(8,4096,128,64,3,True,True),
(8,8192,128,64,3,False,True),
(1,256,2048,32,3,True,True),
]) # yapf: disable
class TestCUDAForward(DistributedTest):
world_size = 1
def test_forward(self, batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_forward(ds_config, seq_len, atol=3e-2)
@pytest.mark.parametrize('batch_size, small_bsz, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
(8,3,1024,512,16,3,True,False),
(8,7,1024,512,16,3,True,True),
(8,3,1024,512,16,3,False,False),
(8,7,1024,512,16,3,False,True),
]) # yapf: disable
class TestCUDAForwardSmallBatchSize(DistributedTest):
world_size = 1
def test_forward_with_small_bsz(self, batch_size, small_bsz, hidden_size, seq_len, heads, num_layers, is_preln,
use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_forward(ds_config, seq_len, atol=3e-2, test_bsz=small_bsz)
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
#(64,1024,128,16,3,True,False),
#(64,1024,128,16,3,True,True),
#(64,1024,128,16,3,False,False),
#(64,1024,128,16,3,False,True),
]) # yapf: disable
class TestCUDAForwardStochastic(DistributedTest):
world_size = 1
def test_forward_stochastic(self, batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
ds_config.stochastic_mode = True
run_forward(ds_config, seq_len, atol=7e-2)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import torch
import pytest
import random
import copy
from torch import nn
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from deepspeed.accelerator import get_accelerator
from unit.modeling import BertConfig, BertLayerNorm, BertEncoder as BertEncoderPostln
from unit.modelingpreln import BertEncoder as BertEncoderPreln
from unit.common import DistributedTest
#if not deepspeed.ops.__installed_ops__['transformer']:
#pytest.skip(
# "transformer kernels are temporarily disabled because of unexplained failures",
# allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
diction_x = {}
diction_y = {}
if verbose:
for i, (x, y) in enumerate(zip(first, second)):
print(x[1], y[1])
for i, (x, y) in enumerate(zip(first, second)):
k = 0
while (diction_x.get((k, x[1])) is not None):
k = k + 1
diction_x[k, x[1]] = x[0]
k = 0
while (diction_y.get((k, y[1])) is not None):
k = k + 1
diction_y[k, y[1]] = y[0]
if verbose:
print()
for i, (x, y) in enumerate(zip(diction_x, diction_y)):
print(x, y)
for i, (x, y) in enumerate(zip(diction_x, diction_y)):
if (x[0] == 1): continue
if verbose:
print("checking ", x[1], ":")
y = diction_y[x[0], x[1]]
x = diction_x[x[0], x[1]]
if verbose:
print(((x == float('inf')).nonzero(as_tuple=True)[0]))
print(((y == float('inf')).nonzero(as_tuple=True)[0]))
x = x.cpu().detach().numpy()
y = y.cpu().detach().numpy()
avgx = np.sum(abs(x), dtype=float)
countx = x.shape[0]
for i in range(len(x.shape) - 1):
countx *= x.shape[i + 1]
avgx = np.sum(avgx)
tolerance = 1
if avgx != float('inf') and avgx != -float('inf'):
avgx = avgx / countx
tolerance = avgx * atol
if verbose:
print("tolerance is ", tolerance)
x = x.flatten()
y = y.flatten()
print("x = {}".format(x))
print("y = {}".format(y))
if any(x == float('inf')) or any(x == -float('inf')):
print("found infinity in x")
if any(y == float('inf')) or any(y == -float('inf')):
print("found infinity in y")
print(np.linalg.norm(x.astype('float64')))
print(np.linalg.norm(y.astype('float64')))
print('-' * 80)
#toler = np.linalg.norm(x.astype('float64')) * 0.0005
np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i), atol=tolerance)
def zero_grad(variables):
for variable in variables:
variable.grad.zero_()
device = torch.device(get_accelerator().device_name())
kwargs_fp32 = {'dtype': torch.float, 'device': device, 'requires_grad': True}
kwargs_fp16 = {'dtype': torch.half, 'device': device, 'requires_grad': True}
class DSEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(DSEncoder, self).__init__()
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList([
copy.deepcopy(DeepSpeedTransformerLayer(config, weights, biases)) for _ in range(config.num_hidden_layers)
])
self.grads = []
self.pre_or_post = config.pre_layer_norm
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
raise NotImplementedError("`checkpoint` is not defined below")
#l = 0
#num_layers = len(self.layer)
#chunk_length = math.ceil(math.sqrt(num_layers))
#while l < num_layers:
# hidden_states = checkpoint.checkpoint(
# custom(
# l, # noqa: F821
# l + chunk_length),
# hidden_states,
# attention_mask * 1)
# l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, grads=self.grads)
hidden_states.register_hook(lambda x, self=self: self.grads.append([x, "hidden_state"]))
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers or checkpoint_activations:
if (self.pre_or_post):
hidden_states = self.FinalLayerNorm(hidden_states)
all_encoder_layers.append(hidden_states)
return all_encoder_layers
def get_grads(self):
return self.grads
def create_models(ds_config):
bert_config = BertConfig(vocab_size_or_config_json_file=119547,
hidden_size=ds_config.hidden_size,
num_hidden_layers=ds_config.num_hidden_layers,
num_attention_heads=ds_config.heads,
intermediate_size=ds_config.intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=ds_config.hidden_dropout_ratio,
attention_probs_dropout_prob=ds_config.attn_dropout_ratio,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=ds_config.initializer_range)
weights = []
biases = []
for i in range(4):
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.hidden_size)))
weights[i].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[4].data.fill_(1.0)
weights.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size, ds_config.hidden_size)))
weights[5].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.intermediate_size)))
weights[6].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[7].data.fill_(1.0)
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[0].data.zero_()
for i in range(4):
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[i + 1].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size)))
biases[5].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[6].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[7].data.zero_()
if (ds_config.pre_layer_norm):
bert_encoder = BertEncoderPreln(bert_config, weights, biases)
else:
bert_encoder = BertEncoderPostln(bert_config, weights, biases)
ds_encoder = DSEncoder(ds_config, weights, biases)
if ds_config.fp16:
bert_encoder.half()
ds_encoder.half()
bert_encoder.to(get_accelerator().device_name())
ds_encoder.to(get_accelerator().device_name())
return bert_encoder, ds_encoder
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def run_backward(ds_config, seq_len, atol=1e-2, verbose=False):
set_seed(123)
bert_encoder, ds_encoder = create_models(ds_config)
# prepare test data
kwargs = kwargs_fp16 if ds_config.fp16 else kwargs_fp32
hidden_states = torch.randn(ds_config.batch_size, seq_len, ds_config.hidden_size, **kwargs)
input_mask = torch.randn(ds_config.batch_size, 1, 1, seq_len, **kwargs)
Y = torch.randn(ds_config.batch_size, seq_len, ds_config.hidden_size, **kwargs)
# run baseline
base_results = bert_encoder(hidden_states,
input_mask,
output_all_encoded_layers=False,
checkpoint_activations=False)
loss = (Y - base_results[0]).pow(2).sum() / 64
loss.backward()
base_grads = bert_encoder.get_grads()
# run ds
ds_results = ds_encoder(hidden_states, input_mask, output_all_encoded_layers=False, checkpoint_activations=False)
loss = (Y - ds_results[0]).pow(2).sum() / 64
loss.backward()
ds_grads = ds_encoder.get_grads()
# check grads
check_equal(base_grads, ds_grads, atol=atol, verbose=verbose)
#test_backward[3-1024-120-16-24-True-True-0.05]
#test_backward[3-1024-52-16-24-False-True-0.2]
# 3-128-54-2-24-False-True-0.2
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16, atol',
[
(64,160,128,2,24,False,True, 0.2),
(64,1600,128,2,4,False,True, 0.2),
(8,1600,128,25,3,True,True, 0.05),
(8,160,128,2,3,True,True, 0.1),
(8,1600,128,2,3,True,True, 0.05),
#(3,1024,119,16,24,True,False, 0.05),
#(3,1024,115,16,24,True,True, 0.05),
#(1024,128,10,2,2,False,False, 0.1),
#(3,1024,52,16,24,False,True, 0.2),
#(3,128,51,2,24,False,False, 0.1),
#(3,128,54,2,24,False,True, 0.2),
]) # yapf: disable
class TestCUDABackward(DistributedTest):
world_size = 1
def test_backward(self, batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16, atol):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and (use_fp16 is True or is_preln is False):
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_backward(ds_config, seq_len, atol=atol, verbose=True)
# [
# (3,1024,128,16,24,True,False, 0.07),
# (3,1024,128,16,24,True,True, 0.05),
# (3,1024,128,16,24,False,False, 0.1),
# (3,1024,128,16,24,False,True, 0.2),
# ]) # yapf: disable
#def test_backward_stochastic(batch_size,
# hidden_size,
# seq_len,
# heads,
# num_layers,
# is_preln,
# use_fp16,
# atol):
# # Only run fp16 test cases on devices with FP16 capability.
# if not get_accelerator().is_fp16_supported() and use_fp16 is True:
# return
#
# ds_config = DeepSpeedTransformerConfig()
# ds_config.layer_id = None
# ds_config.batch_size = batch_size
# ds_config.hidden_size = hidden_size
# ds_config.intermediate_size = 4 * hidden_size
# ds_config.max_seq_length = seq_len
# ds_config.heads = heads
# ds_config.attn_dropout_ratio = 0.0
# ds_config.hidden_dropout_ratio = 0.0
# ds_config.num_hidden_layers = num_layers
# ds_config.pre_layer_norm = is_preln
# ds_config.initializer_range = 0.02
# ds_config.fp16 = use_fp16
# ds_config.stochastic_mode = True
#
# run_backward(ds_config, atol=atol)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, some parts of code taken & adapted from commit c368a9fd1b2c9dee4cc94de9a6bb0be3d447be41
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_softmax.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_matmul.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/utils
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import SparseAttnBuilder
from unit.util import skip_on_arch, skip_on_cuda
if not deepspeed.ops.__compatible_ops__[SparseAttnBuilder.NAME]:
pytest.skip("sparse attention op is not compatible on this system", allow_module_level=True)
def dense_to_sparse(w, mask, block):
"""Converts dense matrix with explicit zeros to sparse matrix
"""
Z = w.size(0)
ret = torch.empty((Z, mask.sum(), block, block), dtype=w.dtype, device=w.device)
nnz = mask.nonzero()
h, i, j = nnz[:, 0], nnz[:, 1], nnz[:, 2]
for zz in range(Z):
for idx, (hh, ii, jj) in enumerate(zip(h, i, j)):
ret[zz, idx, :, :] = w[zz, hh, ii * block:(ii + 1) * block, jj * block:(jj + 1) * block]
return ret
def sparse_to_dense(w, mask, block, zero=0):
"""Converts sparse matrix to dense matrix with explicit zeros
"""
maskedw = w.clone()
for bz, wz in enumerate(range(0, w.size(0))):
for bh, wh in enumerate(range(0, w.size(1))):
for bi, wi in enumerate(range(0, w.size(2), block)):
for bj, wj in enumerate(range(0, w.size(3), block)):
if mask[bh, bi, bj] == 0:
maskedw[wz, wh, wi:wi + block, wj:wj + block] = zero
#maskedw[wz, wh, wi : wi+block, wj : wj+block] *= mask[bh, bi, bj]
return maskedw
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def make_layout(rho, shape):
probs = torch.Tensor([rho, 1 - rho])
generator = torch.distributions.categorical.Categorical(probs)
layout = generator.sample(shape)
return layout
def run_softmax_reference(x, scale, dx, kp_mask, attn_mask, layout, block):
x = sparse_to_dense(x, layout, block, zero=float('-inf'))
x.retain_grad()
if kp_mask is not None:
bcattn_mask = attn_mask[None, None, :, :] + torch.zeros_like(x)
x[bcattn_mask == 0] = float('-inf')
y = torch.softmax(x * scale + kp_mask[:, None, None, :], -1)
else:
y = torch.softmax(x * scale, -1)
y.backward(dx)
dx = x.grad.clone()
dx = dense_to_sparse(dx, layout, block)
y = dense_to_sparse(y, layout, block)
return y, dx
def run_softmax_sparse(x, scale, dx, kp_mask, attn_mask, layout, block):
from deepspeed.ops.sparse_attention.softmax import Softmax
sparse_softmax = Softmax(layout, block, bench=False)
dx = dense_to_sparse(dx, layout, block)
x = dense_to_sparse(x, layout, block)
x.retain_grad()
y = sparse_softmax(x,
scale=scale,
key_padding_mask=kp_mask,
key_padding_mask_mode='add',
attn_mask=attn_mask,
attn_mask_mode='mul')
y.backward(dx)
dx = x.grad.clone()
x.grad.zero_()
return x, dx
def init_softmax_inputs(Z, H, M, N, scale, rho, block, dtype, dense_x=True, layout=None):
if layout is None:
layout = make_layout(rho, (H, M // block, N // block))
if dense_x:
x = torch.rand((Z, H, M, N), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
else:
x = torch.rand((Z, layout.sum(), block, block),
dtype=dtype,
requires_grad=True,
device=get_accelerator().device_name())
dx = torch.rand_like(x)
bool_attn_mask = torch.randint(low=0,
high=2,
size=(N, N),
dtype=torch.bool,
requires_grad=False,
device=get_accelerator().device_name())
fp_attn_mask = bool_attn_mask.type(dtype)
kp_mask = torch.randint(low=0,
high=2,
size=(Z, N),
dtype=dtype,
requires_grad=False,
device=get_accelerator().device_name())
kp_mask[kp_mask == 1.] = float('-inf')
return layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask
@pytest.mark.parametrize("block", [16, 32])
@pytest.mark.parametrize("width", [256, 576])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_softmax(block, width, dtype):
valid_cuda_versions = [101, 102, 110, 111]
skip_on_arch(min_arch=7)
skip_on_cuda(valid_cuda=valid_cuda_versions)
Z = 2
H = 4
scale = 0.4
rho = 0.4
M = N = width
layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask = init_softmax_inputs(Z,
H,
M,
N,
scale,
rho,
block,
dtype,
layout=None)
ref_y, ref_dx = run_softmax_reference(x, scale, dx, kp_mask, bool_attn_mask, layout, block)
st_y, st_dx = run_softmax_sparse(x, scale, dx, kp_mask, fp_attn_mask, layout, block)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
def run_matmul_reference(x, w, mode, trans_a, trans_b, layout, block, dy):
x = sparse_to_dense(x, layout, block) if mode == 'dsd' else x
w = sparse_to_dense(w, layout, block) if mode == 'dds' else w
x.retain_grad()
w.retain_grad()
xx = x.transpose(2, 3) if trans_a else x
ww = w.transpose(2, 3) if trans_b else w
y = torch.matmul(xx, ww)
y = sparse_to_dense(y, layout, block) if mode == 'sdd' else y
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
w.grad.zero_()
y = dense_to_sparse(y, layout, block) if mode == 'sdd' else y
dx = dense_to_sparse(dx, layout, block) if mode == 'dsd' else dx
dw = dense_to_sparse(dw, layout, block) if mode == 'dds' else dw
return y, dx, dw
def run_matmul_sparse(x, w, mode, trans_a, trans_b, layout, block, dy):
from deepspeed.ops.sparse_attention.matmul import MatMul
x = dense_to_sparse(x, layout, block) if mode == 'dsd' else x
w = dense_to_sparse(w, layout, block) if mode == 'dds' else w
dy = dense_to_sparse(dy, layout, block) if mode == 'sdd' else dy
op = MatMul(layout, block, mode, trans_a=trans_a, trans_b=trans_b)
x.retain_grad()
w.retain_grad()
y = op(x, w)
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
return y, dx, dw
def init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout):
torch.manual_seed(1)
AS0 = K if trans_a else M
AS1 = M if trans_a else K
BS0 = N if trans_b else K
BS1 = K if trans_b else N
shape = {'sdd': (M, N), 'dsd': (AS0, AS1), 'dds': (BS0, BS1)}[mode]
x = torch.rand((Z, H, AS0, AS1), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
w = torch.rand((Z, H, BS0, BS1), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
dy = torch.rand((Z, H, M, N), dtype=dtype, device=get_accelerator().device_name())
if layout is None:
layout = make_layout(rho, (H, shape[0] // block, shape[1] // block))
else:
assert list(layout.shape) == [H, shape[0] // block, shape[1] // block]
x.retain_grad()
w.retain_grad()
return x, w, dy, shape, layout
testdata = [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['sdd', 'dds']\
for trans_a in [False]\
for trans_b in [False, True]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['dsd']\
for trans_a in [False, True]\
for trans_b in [False]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float32]\
for mode in ['sdd', 'dsd', 'dds']\
for trans_a in [False]\
for trans_b in [False]\
] + [
(block, torch.float16, mode, False, False)\
for block in [16, 32, 64]\
for mode in ['sdd', 'dsd', 'dds']\
]
@pytest.mark.parametrize("block, dtype, mode, trans_a, trans_b", testdata)
def test_matmul(block, dtype, mode, trans_a, trans_b):
valid_cuda_versions = [101, 102, 110, 111]
skip_on_arch(min_arch=7)
skip_on_cuda(valid_cuda=valid_cuda_versions)
Z = 3
H = 2
M = 128
N = 256
K = 192
rho = 0.5
x, w, dy, shape, layout = init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout=None)
ref_y, ref_dx, ref_dw = run_matmul_reference(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
st_y, st_dx, st_dw = run_matmul_sparse(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
assert allclose(ref_dw, st_dw)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def ref_implementation(vals, gamma, beta, epsilon, channels, dtype):
vals_f = vals.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
return torch.nn.functional.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype)
def ds_implementation(vals, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.layer_norm(vals, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_layer_norm(batch, seq_len, channels, dtype):
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
ref_output = ref_implementation(vals, gamma, beta, epsilon, channels, dtype)
new_output = ds_implementation(vals, gamma, beta, epsilon)
if not allclose(new_output, ref_output):
#print(new_output - ref_output)
assert allclose(new_output, ref_output)
def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, dtype):
vals_f = vals.to(torch.float32)
bias_f = bias.to(torch.float32).reshape(1, 1, -1)
res_f = res.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
return torch.nn.functional.layer_norm(vals_f + bias_f + res_f, (channels, ), weight=gamma_f, bias=beta_f).to(dtype)
def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_layer_norm_residual(batch, seq_len, channels, dtype):
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
bias = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
new_output = residual_ds_implementation(vals, bias, residual, gamma, beta, epsilon)
ref_output = residual_ref_implementation(vals, bias, residual, gamma, beta, epsilon, channels, dtype)
print((new_output - ref_output).abs().max())
assert allclose(new_output, ref_output)
def residual_store_ref_implementation(vals, bias, res, gamma, beta, espilon, channels, dtype):
vals_f = vals.to(torch.float32)
bias_f = bias.to(torch.float32).reshape(1, 1, -1)
res_f = res.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
res_output = vals_f + bias_f + res_f
norm_output = torch.nn.functional.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f).to(dtype)
return norm_output, res_output.to(dtype)
def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_layer_norm_residual_store_pre_ln_res(batch, seq_len, channels, dtype):
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
bias = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
# Need to run the reference first since there's an in-place component to ours
ref_norm_output, norm_res_output = residual_store_ref_implementation(vals, bias, residual, gamma, beta, epsilon,
channels, dtype)
ds_norm_output, ds_res_output = residual_store_ds_implementation(vals, bias, residual, gamma, beta, epsilon)
assert allclose(ds_res_output, norm_res_output)
assert allclose(ds_norm_output, ref_norm_output)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
TOLERANCES = None
def get_tolerances():
global TOLERANCES
if TOLERANCES is None:
# Residual add, as a sequence of casted additions, currently requires a higher tolerance
# than the other operators for FP16. We should instead better align the behaviors
# of the reference to match our kernel implementation (TODO(cmikeh2))
TOLERANCES = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 4e-3)}
if get_accelerator().is_bf16_supported():
# Note: BF16 tolerance is higher than FP16 because of the lower precision (7 (+1) bits vs
# 10 (+1) bits)
TOLERANCES[torch.bfloat16] = (4.8e-1, 3.2e-2)
return TOLERANCES
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = get_tolerances()[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
@pytest.fixture(scope="module")
def inference_module():
return InferenceBuilder().load()
def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True):
if pre_attn_norm:
hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size
else:
hidden_state += residual + final_bias
return hidden_state
def res_add_bias_ref_gptj(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size):
hidden_state += attn_output + (residual + final_bias) / mp_size
if add_attn_bias:
hidden_state += attn_bias / mp_size
return hidden_state
def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, final_bias, mlp_after_attn,
add_attn_bias, mp_size, pre_attn_norm):
if mlp_after_attn:
return res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size, pre_attn_norm)
else:
return res_add_bias_ref_gptj(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias,
mp_size)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("hidden_dim", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
@pytest.mark.parametrize("mlp_after_attn", [True, False])
@pytest.mark.parametrize("add_bias", [True, False])
@pytest.mark.parametrize("mp_size", [1, 2])
@pytest.mark.parametrize("pre_attn_norm", [True, False])
def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size,
pre_attn_norm):
ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
residual = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
attn_output = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
final_bias = torch.randn((hidden_dim), dtype=dtype, device=get_accelerator().device_name())
attn_bias = torch.randn((hidden_dim), dtype=dtype, device=get_accelerator().device_name())
ref_out = ds_out.clone()
ref_out = run_residual_add_reference(ref_out, residual, attn_output, attn_bias, final_bias, mlp_after_attn,
add_bias, mp_size, pre_attn_norm)
res_add_args = [
ds_out, residual, attn_output, attn_bias, final_bias, mp_size, mlp_after_attn, add_bias, pre_attn_norm
]
if dtype == torch.float16:
ds_out = inference_module.residual_add_bias_fp16(*res_add_args)
elif dtype == torch.float32:
ds_out = inference_module.residual_add_bias_fp32(*res_add_args)
elif dtype == torch.bfloat16:
ds_out = inference_module.residual_add_bias_bf16(*res_add_args)
else:
raise ValueError(f"Unsupported dtype: {dtype}")
if not allclose(ds_out, ref_out):
print((ds_out - ref_out).abs().max())
print((ds_out - ref_out).abs().mean())
print((ds_out - ref_out))
assert (allclose(ds_out, ref_out))
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
TOLERANCES = None
def get_tolerances():
global TOLERANCES
if TOLERANCES is None:
TOLERANCES = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}
if get_accelerator().is_bf16_supported():
# Note: BF16 tolerance is higher than FP16 because of the lower precision (7 (+1) bits vs
# 10 (+1) bits)
TOLERANCES[torch.bfloat16] = (4.8e-1, 3.2e-2)
return TOLERANCES
DTYPES = None
def get_dtypes():
global DTYPES
if DTYPES is None:
DTYPES = [torch.float16, torch.float32]
try:
if get_accelerator().is_bf16_supported():
DTYPES.append(torch.bfloat16)
except (AssertionError, AttributeError):
pass
return DTYPES
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = get_tolerances()[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def run_moe_res_matmul_reference(residual, coef1, coef2, output):
return residual * coef1 + output * coef2
def run_moe_res_matmul_ds(residual, coef, output):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
coef_t = coef.transpose(-1, -2).contiguous()
return inference_module.moe_res_matmul(residual, coef_t, output)
@pytest.mark.inference_ops
@pytest.mark.parametrize("hidden_dim", [16, 64])
@pytest.mark.parametrize("c", [1, 4])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_moe_residual_matmul(hidden_dim, c, dtype):
residual_ds = torch.randn((c, hidden_dim * c, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff1 = torch.randn((1, 1, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff2 = torch.randn((1, 1, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
out_ds = torch.randn((c, hidden_dim * c, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff_ds = torch.cat((coeff1, coeff2), dim=-1)
residual_ref = residual_ds.clone().detach()
coeff_ref = coeff_ds.clone().detach()
out_ref = out_ds.clone().detach()
ds_out = run_moe_res_matmul_ds(residual_ds, coeff_ds, out_ds)
ref_out = run_moe_res_matmul_reference(residual_ref, coeff1, coeff2, out_ref)
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
from packaging import version as pkg_version
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_gelu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally and using the tanh approximation
return torch.nn.functional.gelu(activations.to(torch.float32) + bias.to(torch.float32),
approximate='tanh').to(activations.dtype)
def run_bias_gelu_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_gelu_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_gelu_bf16(activations, bias)
else:
return inference_module.bias_gelu_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_gelu(batch, sequence, channels, dtype):
if pkg_version.parse(torch.__version__) < pkg_version.parse("1.12"):
pytest.skip("gelu implementation matches only after torch 1.12")
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_gelu_ds(activations_ds, bias_ds)
ref_out = run_bias_gelu_reference(activations_ref, bias_ref)
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_relu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
return torch.nn.functional.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype)
def run_bias_relu_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_relu_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_relu_bf16(activations, bias)
else:
return inference_module.bias_relu_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_relu(batch, sequence, channels, dtype):
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_relu_ds(activations_ds, bias_ds)
ref_out = run_bias_relu_reference(activations_ref, bias_ref)
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder # type: ignore
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def ref_implementation(vals, gamma, espilon):
variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True)
vals = vals * torch.rsqrt(variance + espilon)
if gamma.dtype in [torch.float16, torch.bfloat16]:
vals = vals.to(gamma.dtype)
return gamma * vals
def ds_implementation(vals, gamma, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.rms_norm(vals, gamma, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_rms_norm(batch, seq_len, channels, dtype):
device = get_accelerator().current_device_name()
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
gamma = torch.randn((channels), dtype=dtype, device=device)
epsilon = 1e-5
ref_output = ref_implementation(vals, gamma, epsilon)
new_output = ds_implementation(vals, gamma, epsilon)
assert allclose(new_output, ref_output)
def pre_ds_implementation(vals, residual, gamma, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.pre_rms_norm(vals, residual, gamma, epsilon)
def pre_ref_implementation(vals, residual, gamma, epsilon):
residual = vals.to(torch.float32) + residual.to(torch.float32)
vals = residual
variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True)
vals = vals * torch.rsqrt(variance + epsilon)
if gamma.dtype in [torch.float16, torch.bfloat16]:
vals = vals.to(gamma.dtype)
return gamma * vals, residual.to(gamma.dtype)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_pre_norm(batch, seq_len, channels, dtype):
device = get_accelerator().current_device_name()
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
gamma = torch.randn((channels), dtype=dtype, device=device)
epsilon = 1e-5
ref_output = pre_ref_implementation(vals, residual, gamma, epsilon)
new_output = pre_ds_implementation(vals, residual, gamma, epsilon)
assert allclose(new_output[0], ref_output[0])
#assert allclose(new_output[1], ref_output[1])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_add_reference(activations, bias):
return activations + bias
def run_bias_add_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_add_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_add_bf16(activations, bias)
else:
return inference_module.bias_add_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_add(batch, sequence, channels, dtype):
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_add_ds(activations_ds, bias_ds)
ref_out = run_bias_add_reference(activations_ref, bias_ref)
if not allclose(ds_out, ref_out):
print((ds_out - ref_out).abs().max())
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
from deepspeed.accelerator import get_accelerator
from deepspeed.utils.types import ActivationFuncType
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_geglu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
# Explicitly using the default GeLU
activations = activations + bias.reshape(1, 1, -1)
hidden_states, gate = activations.chunk(2, dim=-1)
return hidden_states * torch.nn.functional.gelu(gate.to(torch.float32)).to(activations.dtype)
def run_bias_geglu_ds(activation, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_geglu(batch, sequence, channels, dtype):
activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name())
bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name())
ds_out = run_bias_geglu_ds(activation, bias)
ref_out = run_bias_geglu_reference(activation, bias)
assert (allclose(ds_out, ref_out))
def run_gated_silu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
# Explicitly using the default GeLU
activations = activations + bias.reshape(1, 1, -1)
hidden_states, gate = activations.chunk(2, dim=-1)
return hidden_states * torch.nn.functional.silu(gate.to(torch.float32)).to(activations.dtype)
def run_gated_silu_ds(activation, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_gated_silu(batch, sequence, channels, dtype):
activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name())
bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name())
ds_out = run_gated_silu_ds(activation, bias)
ref_out = run_gated_silu_reference(activation, bias)
assert (allclose(ds_out, ref_out))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import os
import filecmp
import torch
import deepspeed
import deepspeed.comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
from unit.common import DistributedTest
KILO_BYTE = 1024
BLOCK_SIZE = KILO_BYTE
QUEUE_DEPTH = 2
IO_SIZE = 4 * BLOCK_SIZE
IO_PARALLEL = 2
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
pytest.skip('Skip tests since async-io is not compatible', allow_module_level=True)
def _skip_for_invalid_environment(use_cuda_device=True, use_cuda_pinned_tensor=True):
if not get_accelerator().is_available():
if use_cuda_device:
pytest.skip("GPU tensors only supported in CUDA environments.")
if use_cuda_pinned_tensor:
pytest.skip("CUDA-pinned tensors only supported in CUDA environments.")
def _get_local_rank():
if get_accelerator().is_available():
return dist.get_rank()
return 0
def _do_ref_write(tmpdir, index=0):
file_suffix = f'{_get_local_rank()}_{index}'
ref_file = os.path.join(tmpdir, f'_py_random_{file_suffix}.pt')
ref_buffer = os.urandom(IO_SIZE)
with open(ref_file, 'wb') as f:
f.write(ref_buffer)
return ref_file, ref_buffer
def _get_test_write_file(tmpdir, index):
file_suffix = f'{_get_local_rank()}_{index}'
return os.path.join(tmpdir, f'_aio_write_random_{file_suffix}.pt')
def _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffer, index=0):
test_file = _get_test_write_file(tmpdir, index)
test_buffer = get_accelerator().ByteTensor(list(ref_buffer))
return test_file, test_buffer
def _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, aio_handle=None, index=0):
test_file = _get_test_write_file(tmpdir, index)
if aio_handle is None:
test_buffer = get_accelerator().pin_memory(torch.ByteTensor(list(ref_buffer)))
else:
tmp_buffer = torch.ByteTensor(list(ref_buffer))
test_buffer = aio_handle.new_cpu_locked_tensor(len(ref_buffer), tmp_buffer)
test_buffer.data.copy_(tmp_buffer)
return test_file, test_buffer
def _validate_handle_state(handle, single_submit, overlap_events):
assert handle.get_single_submit() == single_submit
assert handle.get_overlap_events() == overlap_events
assert handle.get_thread_count() == IO_PARALLEL
assert handle.get_block_size() == BLOCK_SIZE
assert handle.get_queue_depth() == QUEUE_DEPTH
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("single_submit", [True, False])
@pytest.mark.parametrize("overlap_events", [True, False])
class TestRead(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
def test_parallel_read(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events):
_skip_for_invalid_environment(use_cuda_device=False, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if use_cuda_pinned_tensor:
aio_buffer = get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
else:
aio_buffer = h.new_cpu_locked_tensor(IO_SIZE, torch.empty(0, dtype=torch.uint8))
_validate_handle_state(h, single_submit, overlap_events)
ref_file, _ = _do_ref_write(tmpdir)
read_status = h.sync_pread(aio_buffer, ref_file)
assert read_status == 1
with open(ref_file, 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffer.tolist()
if not use_cuda_pinned_tensor:
h.free_cpu_locked_tensor(aio_buffer)
@pytest.mark.parametrize("cuda_device", [True, False])
def test_async_read(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
use_cpu_locked_tensor = False
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if cuda_device:
aio_buffer = torch.empty(IO_SIZE, dtype=torch.uint8, device=get_accelerator().device_name())
elif use_cuda_pinned_tensor:
aio_buffer = get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
else:
aio_buffer = h.new_cpu_locked_tensor(IO_SIZE, torch.empty(0, dtype=torch.uint8))
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
ref_file, _ = _do_ref_write(tmpdir)
read_status = h.async_pread(aio_buffer, ref_file)
assert read_status == 0
wait_status = h.wait()
assert wait_status == 1
with open(ref_file, 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffer.tolist()
if use_cpu_locked_tensor:
h.free_cpu_locked_tensor(aio_buffer)
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("single_submit", [True, False])
@pytest.mark.parametrize("overlap_events", [True, False])
class TestWrite(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
def test_parallel_write(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events):
_skip_for_invalid_environment(use_cuda_device=False, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_file, ref_buffer = _do_ref_write(tmpdir)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if use_cuda_pinned_tensor:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer)
else:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, h)
_validate_handle_state(h, single_submit, overlap_events)
write_status = h.sync_pwrite(aio_buffer, aio_file)
assert write_status == 1
if not use_cuda_pinned_tensor:
h.free_cpu_locked_tensor(aio_buffer)
assert os.path.isfile(aio_file)
filecmp.clear_cache()
assert filecmp.cmp(ref_file, aio_file, shallow=False)
@pytest.mark.parametrize("cuda_device", [True, False])
def test_async_write(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_file, ref_buffer = _do_ref_write(tmpdir)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
use_cpu_locked_tensor = False
if cuda_device:
aio_file, aio_buffer = _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffer)
elif use_cuda_pinned_tensor:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer)
else:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, h)
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
write_status = h.async_pwrite(aio_buffer, aio_file)
assert write_status == 0
wait_status = h.wait()
assert wait_status == 1
if use_cpu_locked_tensor:
h.free_cpu_locked_tensor(aio_buffer)
assert os.path.isfile(aio_file)
filecmp.clear_cache()
assert filecmp.cmp(ref_file, aio_file, shallow=False)
@pytest.mark.sequential
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("cuda_device", [True, False])
class TestAsyncQueue(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
@pytest.mark.parametrize("async_queue", [2, 3])
def test_read(self, tmpdir, async_queue, use_cuda_pinned_tensor, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_files = []
for i in range(async_queue):
f, _ = _do_ref_write(tmpdir, i)
ref_files.append(f)
single_submit = True
overlap_events = True
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
use_cpu_locked_tensor = False
if cuda_device:
aio_buffers = [
torch.empty(IO_SIZE, dtype=torch.uint8, device=get_accelerator().device_name())
for _ in range(async_queue)
]
elif use_cuda_pinned_tensor:
aio_buffers = [
get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
for _ in range(async_queue)
]
else:
tmp_tensor = torch.empty(0, dtype=torch.uint8)
aio_buffers = [h.new_cpu_locked_tensor(IO_SIZE, tmp_tensor) for _ in range(async_queue)]
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
for i in range(async_queue):
read_status = h.async_pread(aio_buffers[i], ref_files[i])
assert read_status == 0
wait_status = h.wait()
assert wait_status == async_queue
for i in range(async_queue):
with open(ref_files[i], 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffers[i].tolist()
if use_cpu_locked_tensor:
for t in aio_buffers:
h.free_cpu_locked_tensor(t)
@pytest.mark.parametrize("async_queue", [2, 3])
def test_write(self, tmpdir, use_cuda_pinned_tensor, async_queue, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_files = []
ref_buffers = []
for i in range(async_queue):
f, buf = _do_ref_write(tmpdir, i)
ref_files.append(f)
ref_buffers.append(buf)
single_submit = True
overlap_events = True
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
aio_files = []
aio_buffers = []
for i in range(async_queue):
if cuda_device:
f, buf = _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffers[i], i)
elif use_cuda_pinned_tensor:
f, buf = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffers[i], None, i)
else:
f, buf = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffers[i], h, i)
aio_files.append(f)
aio_buffers.append(buf)
use_cpu_locked_tensor = not (cuda_device or use_cuda_pinned_tensor)
_validate_handle_state(h, single_submit, overlap_events)
for i in range(async_queue):
read_status = h.async_pwrite(aio_buffers[i], aio_files[i])
assert read_status == 0
wait_status = h.wait()
assert wait_status == async_queue
if use_cpu_locked_tensor:
for t in aio_buffers:
h.free_cpu_locked_tensor(t)
for i in range(async_queue):
assert os.path.isfile(aio_files[i])
filecmp.clear_cache()
assert filecmp.cmp(ref_files[i], aio_files[i], shallow=False)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import numpy as np
import pytest
import deepspeed
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import CPUAdagradBuilder
from unit.common import DistributedTest
if not deepspeed.ops.__compatible_ops__[CPUAdagradBuilder.NAME]:
pytest.skip("cpu-adagrad is not compatible", allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
y = second.detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="param-update mismatch!", atol=atol)
class TestCPUAdagrad(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
@pytest.mark.parametrize('model_size',
[
(64),
(22),
(55),
(127),
(1024),
(1048576),
(30000000),
]) # yapf: disable
def test_cpu_adagrad_opt(self, model_size):
device = 'cpu'
rng_state = torch.get_rng_state()
param = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
rng_state = torch.get_rng_state()
param.grad = torch.randn(model_size, device=device)
torch.set_rng_state(rng_state)
param1.grad = torch.randn(model_size, device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
@pytest.mark.parametrize('model_size,vocabulary_size,dim',
[
(16 * 2, 16 * 4, 16),
(16 * 32, 16 * 256, 16),
(16 * 256, 16 * 16384, 16),
]) # yapf: disable
def test_cpu_adagrad_opt_sparse_embedding(self, model_size, vocabulary_size, dim):
device = 'cpu'
rng_state = torch.get_rng_state()
def gen_sparse_grad(vocabulary_size, dim, num_indices, dtype, device):
i = torch.randint(vocabulary_size, size=(1, num_indices), dtype=torch.int64, device=device)
v = torch.randn(num_indices, dim, dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, (vocabulary_size, dim), device=device)
t = t.coalesce()
new_i = (t.indices().view(-1, 1).repeat(1, dim) * dim + torch.tensor(range(dim))).flatten().unsqueeze(0)
new_v = t.values().flatten()
new_t = torch.sparse_coo_tensor(new_i, new_v, (vocabulary_size * dim, ), device=device)
new_t = new_t.coalesce()
new_t.requires_grad = False
return new_t
voc_size = vocabulary_size
dim = dim
num_indices = int(model_size // dim)
dtype = torch.float32
param = torch.nn.Parameter(torch.randn((voc_size * dim, ), dtype=dtype, device=device), requires_grad=True)
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn((voc_size * dim, ), dtype=dtype, device=device), requires_grad=True)
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
torch.set_rng_state(rng_state)
param.grad = gen_sparse_grad(voc_size, dim, num_indices, dtype=dtype, device=device)
torch.set_rng_state(rng_state)
param1.grad = gen_sparse_grad(voc_size, dim, num_indices, dtype=dtype, device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
class TestCPUAdagradGPUError(DistributedTest):
def test_cpu_adagrad_gpu_error(self):
model_size = 64
device = get_accelerator().device_name(0) # 'cuda:0' or 'xpu:0'
param = torch.nn.Parameter(torch.randn(model_size, device=device))
optimizer = DeepSpeedCPUAdagrad([param])
param.grad = torch.randn(model_size, device=device)
with pytest.raises(AssertionError):
optimizer.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
from deepspeed.ops.transformer.inference.bias_add import nhwc_bias_add
from deepspeed.accelerator import get_accelerator
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-3, 5e-4), torch.float16: (3e-2, 2e-3), torch.int8: (1, 1)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def ref_bias_add(activations, bias):
return activations + bias.reshape(1, -1, 1, 1)
channels_list = [192, 384, 320, 576, 640, 768, 960, 1152, 1280, 1536, 1600, 1920, 2240, 2560]
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add(activations.clone().detach(), bias)
ds_vals = nhwc_bias_add(activations, bias)
assert allclose(ds_vals, ref_vals)
def ref_bias_add_add(activations, bias, other):
return (activations + bias.reshape(1, -1, 1, 1)) + other
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
other = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add_add(activations.clone().detach(), bias, other)
ds_vals = nhwc_bias_add(activations, bias, other=other)
assert allclose(ds_vals, ref_vals)
def ref_bias_add_bias_add(activations, bias, other, other_bias):
return (activations + bias.reshape(1, -1, 1, 1)) + (other + other_bias.reshape(1, -1, 1, 1))
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add_bias_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
other = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
other_bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add_bias_add(activations.clone().detach(), bias, other, other_bias)
ds_vals = nhwc_bias_add(activations, bias, other=other, other_bias=other_bias)
assert allclose(ds_vals, ref_vals)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops import op_builder
quantizer_cuda_module = None
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (2e-2, 5e-3), torch.float16: (2e-2, 5e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def quantize_dequantize_ref(inputs, bit, num_groups=1):
# quantize
q_range = 2**bit
input_flat = inputs.float().reshape(num_groups, -1).contiguous()
input_flat = torch.nan_to_num(input_flat, nan=0.0)
input_min = input_flat.amin(-1, keepdim=True)
input_max = input_flat.amax(-1, keepdim=True)
scale = q_range / (2 * torch.max(input_min.abs(), input_max.abs() + 1e-5))
input_flat = (input_flat * scale).round().clamp(-q_range // 2, q_range // 2 - 1)
# dequantize
dequant_flat = torch.t(input_flat.to(torch.int8)) / scale.view(-1).to(torch.float16)
return torch.t(dequant_flat).reshape(inputs.shape)
def run_quant_dequant(inputs, groups, bits):
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = op_builder.QuantizerBuilder().load()
return quantizer_cuda_module.ds_quantize_fp16(inputs, groups, bits)
@pytest.mark.inference_ops
@pytest.mark.parametrize("tensor_shape", [(16, 4096), (128, 256)])
# Test with two tensor shapes as (16, 4096) and (128, 256).
@pytest.mark.parametrize("groups", [1, 16])
# Test with number of quant groups as 1 and 16.
# Note that we have an explicit boundary for groups as ((size / groups) - 1) / 4096 + 1) <= MAX_REG.
def test_fake_quant_dequant(tensor_shape, groups):
input_tensor = torch.rand((tensor_shape), dtype=torch.float16).to(get_accelerator().device_name())
# 8-bit quantization.
ref_input_8bit = input_tensor.clone().detach()
ds_input_8bit = input_tensor.clone().detach()
ref_out_8bit = quantize_dequantize_ref(ref_input_8bit, 8, groups)
# run_quant_dequant will do quantize then dequantize, and return the dequantized value.
ds_out_8bit = run_quant_dequant(ds_input_8bit, groups, 8)
assert (allclose(ds_out_8bit, ref_out_8bit))
# 4-bit quantization.
ref_input_4bit = input_tensor.clone().detach()
ds_input_4bit = input_tensor.clone().detach()
ref_out_4bit = quantize_dequantize_ref(ref_input_4bit, 4, groups)
ds_out_4bit = run_quant_dequant(ds_input_4bit, groups, 4)
assert (allclose(ds_out_4bit, ref_out_4bit))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
from deepspeed.ops import op_builder
from deepspeed.accelerator import get_accelerator
inference_module = None
def run_quantize_ds(activations, num_groups, q_bits, is_symmetric_quant):
global inference_module
if inference_module is None:
inference_module = op_builder.QuantizerBuilder().load()
return inference_module.quantize(activations, num_groups, q_bits,
inference_module.Symmetric if is_symmetric_quant else inference_module.Asymmetric)
def run_dequantize_ds(activations, params, num_groups, q_bits, is_symmetric_quant):
global inference_module
if inference_module is None:
inference_module = op_builder.QuantizerBuilder().load()
return inference_module.dequantize(
activations,
params,
num_groups,
q_bits,
inference_module.Symmetric if is_symmetric_quant else inference_module.Asymmetric,
)
def get_q_props(q_bits):
q_range = 2**q_bits
q_min = -(2**(q_bits - 1))
q_max = (2**(q_bits - 1) - 1)
q_min = torch.IntTensor([q_min]).to(device=get_accelerator().device_name())
q_max = torch.IntTensor([q_max]).to(device=get_accelerator().device_name())
return q_range, q_max, q_min
def get_scale_zero_point(q_bits, is_symmetric_quant, max, min, absmax, scales=None, zero_points=None):
q_range, q_max, q_min = get_q_props(q_bits)
if is_symmetric_quant:
scale = torch.empty_like(absmax)
for i, x in enumerate(absmax):
scale[i] = torch.ones_like(x) if x == 0 else q_range / (2 * x)
zero_point = torch.zeros(scale.shape, dtype=torch.float32, device=get_accelerator().device_name())
else:
scale = torch.empty_like(max)
for i, x in enumerate(max):
scale[i] = torch.ones_like(x) if max[i] == min[i] else q_range / (max[i] - min[i])
zero_point = q_min - (min * scale)
return scale, zero_point
def int4x2to2xint4(int4X2tensor):
high = int4X2tensor >> 4
low = (int4X2tensor << 4) >> 4
return torch.stack((high, low), dim=-1).flatten()
def run_float_quantize(q_bits, is_symmetric_quant, activations_ref, num_groups):
# Reference implementation
# https://pytorch.org/docs/stable/quantization-support.html
activations_ref = activations_ref.reshape(num_groups, -1).to(dtype=torch.float32)
max_abs_activations_ref = torch.amax(torch.abs(activations_ref), dim=-1).view(num_groups, -1)
max_activations_ref = torch.amax(activations_ref, dim=-1).view(num_groups, -1)
min_activations_ref = torch.amin(activations_ref, dim=-1).view(num_groups, -1)
_, q_max, q_min = get_q_props(q_bits)
scale, zero_point = get_scale_zero_point(q_bits, is_symmetric_quant, max_activations_ref, min_activations_ref,
max_abs_activations_ref)
data_f = activations_ref * scale
if not is_symmetric_quant:
data_f = data_f + zero_point
data_i32 = torch.round(data_f).to(dtype=torch.int32)
data_i32 = torch.minimum(torch.maximum(data_i32, q_min.expand_as(data_i32)), q_max.expand_as(data_i32))
data_i8 = data_i32.to(dtype=torch.int8)
scales = (1.0 / scale).reshape(-1, 1)
offsets = zero_point.reshape(-1, 1)
params = torch.cat((scales, offsets), dim=-1)
return data_i8, params
def run_float_dequantize(q_bits, is_symmetric_quant, data_i8, params, num_groups):
data_f = data_i8.reshape(num_groups, -1).to(dtype=torch.float32)
scales = params[:, 0].reshape(-1, 1)
offsets = params[:, 1].reshape(-1, 1)
if not is_symmetric_quant:
data_f = data_f - offsets
else:
assert offsets.allclose(torch.zeros_like(offsets))
data_f = data_f * scales
return data_f
@pytest.mark.inference_ops
@pytest.mark.parametrize("num_groups", [1, 13, 512])
@pytest.mark.parametrize("num_elems", [8, 16, 32, 64, 128, 256, 4096, 8192, 12288, 16384])
@pytest.mark.parametrize("is_symmetric_quant", [True, False])
@pytest.mark.parametrize("q_bits", [4, 8])
@pytest.mark.parametrize("directed_case", ["all_zeros", None])
def test_float_quantize(num_elems, num_groups, is_symmetric_quant, q_bits, directed_case):
# fix seed
torch.manual_seed(num_elems)
if directed_case == "all_zeros":
activations_ds = torch.zeros((num_groups, num_elems),
dtype=torch.float16,
device=get_accelerator().device_name())
else:
activations_ds = torch.randn((num_groups, num_elems),
dtype=torch.float16,
device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
ref_out_tensor, ref_params = run_float_quantize(q_bits, is_symmetric_quant, activations_ref, num_groups)
ref_dequantized_tensor = run_float_dequantize(q_bits, is_symmetric_quant, ref_out_tensor, ref_params, num_groups)
# we need to convert the tensor to float64 to avoid overflow
ref_quantization_error = torch.sum(torch.abs((activations_ref - ref_dequantized_tensor).to(torch.float64)))
ds_out_tensor, ds_out_params = run_quantize_ds(activations_ds, num_groups, q_bits, is_symmetric_quant)
ds_dequantized_tensor = run_dequantize_ds(ds_out_tensor, ds_out_params, num_groups, q_bits, is_symmetric_quant)
assert torch.all(torch.isfinite(ds_dequantized_tensor))
ds_quantization_error = torch.sum(torch.abs((activations_ds - ds_dequantized_tensor).to(torch.float64)))
assert (ds_quantization_error <= ref_quantization_error * 1.05)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.ops.adam import FusedAdam
from deepspeed.ops.adam import DeepSpeedCPUAdam
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
# yapf: disable
#'optimizer, zero_offload, torch_adam, adam_w_mode, resulting_optimizer
adam_configs = [["AdamW", False, False, False, (FusedAdam, True)],
["AdamW", False, True, False, (torch.optim.AdamW, None)],
["AdamW", True, False, False, (DeepSpeedCPUAdam, True)],
["AdamW", True, True, False, (torch.optim.AdamW, None)],
["AdamW", False, False, True, (FusedAdam, True)],
["AdamW", False, True, True, (torch.optim.AdamW, None)],
["AdamW", True, False, True, (DeepSpeedCPUAdam, True)],
["AdamW", True, True, True, (torch.optim.AdamW, None)],
["Adam", False, False, False, (FusedAdam, False)],
["Adam", False, True, False, (torch.optim.Adam, None)],
["Adam", True, False, False, (DeepSpeedCPUAdam, False)],
["Adam", True, True, False, (torch.optim.Adam, None)],
["Adam", False, False, True, (FusedAdam, True)],
["Adam", False, True, True, (torch.optim.AdamW, None)],
["Adam", True, False, True, (DeepSpeedCPUAdam, True)],
["Adam", True, True, True, (torch.optim.AdamW, None)]]
@pytest.mark.parametrize(
'optimizer, zero_offload, torch_adam, adam_w_mode, resulting_optimizer',
adam_configs)
class TestAdamConfigs(DistributedTest):
world_size = 1
def test(self,
optimizer,
zero_offload,
torch_adam,
adam_w_mode,
resulting_optimizer):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": optimizer,
"params": {
"lr": 0.00015,
"torch_adam": torch_adam,
"adam_w_mode": adam_w_mode
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 2,
"cpu_offload": zero_offload
}
}
model = SimpleModel(10)
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
# get base optimizer under zero
ds_optimizer = model.optimizer.optimizer
opt_class, adam_w_mode = resulting_optimizer
assert isinstance(ds_optimizer, opt_class)
if adam_w_mode in [True, False]:
assert ds_optimizer.adam_w_mode == adam_w_mode
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import numpy as np
import pytest
from cpuinfo import get_cpu_info
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.adam import FusedAdam
from deepspeed.ops.op_builder import CPUAdamBuilder
from unit.common import DistributedTest
if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible", allow_module_level=True)
pytest.cpu_vendor = get_cpu_info()["vendor_id_raw"].lower()
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
y = second.detach().numpy()
print("ATOL", atol)
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="param-update mismatch!", atol=atol)
def _compare_optimizers(model_size, param1, optimizer1, param2, optimizer2):
for i in range(10):
param1.grad = torch.randn(model_size, device=param1.device).to(param1.dtype)
param2.grad = param1.grad.clone().detach().to(device=param2.device, dtype=param2.dtype)
optimizer1.step()
optimizer2.step()
tolerance = param1.float().norm().detach().numpy() * 1e-2
check_equal(param1.float().norm(), param2.float().cpu().norm(), atol=tolerance, verbose=True)
@pytest.mark.parametrize('dtype', [torch.half, torch.float], ids=["fp16", "fp32"])
@pytest.mark.parametrize('model_size',
[
(64),
(22),
#(55),
(128),
(1024),
(1048576),
]) # yapf: disable
class TestCPUAdam(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
@pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.")
def test_fused_adam_equal(self, dtype, model_size):
if ("amd" in pytest.cpu_vendor) and (dtype == torch.half):
pytest.skip("cpu-adam with half precision not supported on AMD CPUs")
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_data = torch.randn(model_size, device='cpu').to(dtype)
cpu_param = torch.nn.Parameter(cpu_data)
cuda_param = torch.nn.Parameter(cpu_data.to(get_accelerator().device_name()))
# tolerance = cpu_param.float().norm().detach().numpy() * 1e-2
# check_equal(cpu_param.float().norm(),
# cuda_param.float().cpu().norm(),
# atol=tolerance,
# verbose=True)
cpu_optimizer = DeepSpeedCPUAdam([cpu_param])
cuda_optimizer = FusedAdam([cuda_param])
_compare_optimizers(model_size=model_size,
param1=cpu_param,
optimizer1=cpu_optimizer,
param2=cuda_param,
optimizer2=cuda_optimizer)
def test_torch_adamw_equal(self, dtype, model_size):
if get_accelerator().is_available():
if ("amd" in pytest.cpu_vendor) and (dtype == torch.half):
pytest.skip("cpu-adam with half precision not supported on AMD CPUs")
ref_param_device = get_accelerator().device_name()
else:
if dtype == torch.half:
pytest.skip("torch.optim.AdamW with half precision only supported in CUDA environments.")
ref_param_device = 'cpu'
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_data = torch.randn(model_size, device='cpu').to(dtype)
cpu_param = torch.nn.Parameter(cpu_data)
ref_param = torch.nn.Parameter(cpu_data.to(ref_param_device))
cpu_optimizer = DeepSpeedCPUAdam([cpu_param])
ref_optimizer = torch.optim.AdamW([ref_param])
_compare_optimizers(model_size=model_size,
param1=cpu_param,
optimizer1=cpu_optimizer,
param2=ref_param,
optimizer2=ref_optimizer)
class TestCPUAdamGPUError(DistributedTest):
def test_cpu_adam_gpu_error(self):
model_size = 64
from deepspeed.ops.adam import DeepSpeedCPUAdam
device = get_accelerator().device_name(0) # 'cuda:0' or 'xpu:0'
param = torch.nn.Parameter(torch.randn(model_size, device=device))
optimizer = DeepSpeedCPUAdam([param])
param.grad = torch.randn(model_size, device=device)
with pytest.raises(AssertionError):
optimizer.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import pytest
import deepspeed
from deepspeed.ops.op_builder import OpBuilder
from unit.common import DistributedTest
from transformers import (AutoConfig, AutoTokenizer, AutoModelForCausalLM)
rocm_version = OpBuilder.installed_rocm_version()
if rocm_version != (0, 0):
pytest.skip("skip inference tests on rocm for now", allow_module_level=True)
@pytest.mark.seq_inference
@pytest.mark.parametrize("batch_size", [1, 2], ids=["bsz=1", "bsz=2"])
@pytest.mark.parametrize("model_name", ["EleutherAI/gpt-neo-1.3B", "facebook/opt-1.3b"])
class TestHybridEngineTextGen(DistributedTest):
world_size = 1
def _generate(self, model, tokenizer, prompt):
local_rank = int(os.getenv("LOCAL_RANK", "0"))
tokens = tokenizer.batch_encode_plus(prompt, return_tensors="pt", padding=True)
for t in tokens:
if torch.is_tensor(tokens[t]):
tokens[t] = tokens[t].to(f'cuda:{local_rank}')
output = model.generate(**tokens, do_sample=False, max_length=100)
outputs = tokenizer.batch_decode(output, skip_special_tokens=True)
return outputs
def get_model(self, model_name):
local_rank = int(os.getenv("LOCAL_RANK", "0"))
model_config = AutoConfig.from_pretrained(model_name)
model_config.dropout = 0.0
model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config)
model = model.half()
model = model.to(f'cuda:{local_rank}')
return model
def get_tokenizer(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
def get_prompt(self, batch_size):
if batch_size == 1:
prompt = ["Microsoft is in Washington"]
elif batch_size == 2:
prompt = ["DeepSpeed is", "Microsoft is in Washington"]
else:
raise NotImplementedError(f"batch_size {batch_size} not implemented")
return prompt
def test_correctness(self, batch_size, model_name):
pytest.skip("skip test for now, will fix in follow-up PR")
model = self.get_model(model_name)
tokenizer = self.get_tokenizer(model_name)
prompt = self.get_prompt(batch_size)
base_out = self._generate(model, tokenizer, prompt)
ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}}
model, *_ = deepspeed.initialize(model=model, config=ds_config)
model.eval()
ds1_out = self._generate(model, tokenizer, prompt)
assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}"
model.train()
model.eval()
ds2_out = self._generate(model, tokenizer, prompt)
assert base_out == ds2_out
def test_functionality(self, batch_size, model_name):
model = self.get_model(model_name)
tokenizer = self.get_tokenizer(model_name)
prompt = self.get_prompt(batch_size)
ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}}
model, *_ = deepspeed.initialize(model=model, config=ds_config)
model.eval()
ds1_out = self._generate(model, tokenizer, prompt)
model.train()
model.eval()
ds2_out = self._generate(model, tokenizer, prompt)
assert ds1_out == ds2_out, f"ds1_out: {ds1_out}, ds2_out: {ds2_out}"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import pytest
import deepspeed
from deepspeed.ops.op_builder import OpBuilder
from unit.common import DistributedTest
from transformers import (AutoConfig, AutoTokenizer, AutoModelForCausalLM)
rocm_version = OpBuilder.installed_rocm_version()
if rocm_version != (0, 0):
pytest.skip("skip inference tests on rocm for now", allow_module_level=True)
@pytest.mark.seq_inference
@pytest.mark.parametrize("batch_size", [1, 2], ids=["bsz=1", "bsz=2"])
@pytest.mark.parametrize("model_name", ["huggyllama/llama-7b"])
class TestHybridEngineLlama(DistributedTest):
world_size = 1
def _generate(self, model, tokenizer, prompt):
local_rank = int(os.getenv("LOCAL_RANK", "0"))
tokens = tokenizer.batch_encode_plus(prompt, return_tensors="pt", padding=True)
for t in tokens:
if torch.is_tensor(tokens[t]):
tokens[t] = tokens[t].to(f'cuda:{local_rank}')
#output = model.generate(**tokens, do_sample=False, max_length=100)
output = model.generate(tokens.input_ids, do_sample=False, max_length=100)
outputs = tokenizer.batch_decode(output, skip_special_tokens=True)
return outputs
def get_model(self, model_name):
local_rank = int(os.getenv("LOCAL_RANK", "0"))
model_config = AutoConfig.from_pretrained(model_name)
model_config.dropout = 0.0
model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config)
# Make the model smaller so we can run it on a single GPU in CI
_ = [model.model.layers.pop(-1) for _ in range(8)]
model = model.half()
model = model.to(f'cuda:{local_rank}')
return model
def get_tokenizer(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
def get_prompt(self, batch_size):
if batch_size == 1:
prompt = ["Microsoft is in Washington"]
elif batch_size == 2:
prompt = ["DeepSpeed is", "Microsoft is in Washington"]
else:
raise NotImplementedError(f"batch_size {batch_size} not implemented")
return prompt
def test_correctness(self, batch_size, model_name):
pytest.skip("skip test for now, will fix in follow-up PR")
model = self.get_model(model_name)
tokenizer = self.get_tokenizer(model_name)
prompt = self.get_prompt(batch_size)
base_out = self._generate(model, tokenizer, prompt)
ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}}
model, *_ = deepspeed.initialize(model=model, config=ds_config)
model.eval()
ds1_out = self._generate(model, tokenizer, prompt)
assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}"
model.train()
model.eval()
ds2_out = self._generate(model, tokenizer, prompt)
assert base_out == ds2_out
def test_functionality(self, batch_size, model_name):
model = self.get_model(model_name)
tokenizer = self.get_tokenizer(model_name)
prompt = self.get_prompt(batch_size)
ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}}
model, *_ = deepspeed.initialize(model=model, config=ds_config)
model.eval()
ds1_out = self._generate(model, tokenizer, prompt)
model.train()
model.eval()
ds2_out = self._generate(model, tokenizer, prompt)
assert ds1_out == ds2_out, f"ds1_out: {ds1_out}, ds2_out: {ds2_out}"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pytest
from unit.simple_model import create_config_from_dict
from deepspeed.launcher import runner as dsrun
from deepspeed.autotuning.autotuner import Autotuner
from deepspeed.autotuning.scheduler import ResourceManager
RUN_OPTION = 'run'
TUNE_OPTION = 'tune'
def test_command_line():
'''Validate handling of command line arguments'''
for opt in [RUN_OPTION, TUNE_OPTION]:
dsrun.parse_args(args=f"--num_nodes 1 --num_gpus 1 --autotuning {opt} foo.py".split())
for error_opts in [
"--autotuning --num_nodes 1 --num_gpus 1 foo.py".split(),
"--autotuning test --num_nodes 1 -- num_gpus 1 foo.py".split(), "--autotuning".split()
]:
with pytest.raises(SystemExit):
dsrun.parse_args(args=error_opts)
@pytest.mark.parametrize("arg_mappings",
[
None,
{
},
{
"train_micro_batch_size_per_gpu": "--per_device_train_batch_size"
},
{
"train_micro_batch_size_per_gpu": "--per_device_train_batch_size",
"gradient_accumulation_steps": "--gradient_accumulation_steps"
},
{
"train_batch_size": "-tbs"
}
]) # yapf: disable
def test_resource_manager_arg_mappings(arg_mappings):
rm = ResourceManager(args=None,
hosts="worker-0, worker-1",
num_gpus_per_node=4,
results_dir=None,
exps_dir=None,
arg_mappings=arg_mappings)
if arg_mappings is not None:
for k, v in arg_mappings.items():
assert k.strip() in rm.arg_mappings.keys()
assert arg_mappings[k.strip()].strip() == rm.arg_mappings[k.strip()]
@pytest.mark.parametrize("active_resources",
[
{"worker-0": [0, 1, 2, 3]},
{"worker-0": [0, 1, 2, 3], "worker-1": [0, 1, 2, 3]},
{"worker-0": [0], "worker-1": [0, 1, 2], "worker-2": [0, 1, 2]},
{"worker-0": [0, 1], "worker-2": [4, 5]}
]
) # yapf: disable
def test_autotuner_resources(tmpdir, active_resources):
config_dict = {"autotuning": {"enabled": True, "exps_dir": os.path.join(tmpdir, 'exps_dir'), "arg_mappings": {}}}
config_path = create_config_from_dict(tmpdir, config_dict)
args = dsrun.parse_args(args=f'--autotuning {TUNE_OPTION} foo.py --deepspeed_config {config_path}'.split())
tuner = Autotuner(args=args, active_resources=active_resources)
expected_num_nodes = len(list(active_resources.keys()))
assert expected_num_nodes == tuner.exp_num_nodes
expected_num_gpus = min([len(v) for v in active_resources.values()])
assert expected_num_gpus == tuner.exp_num_gpus
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import pytest
from unit.common import DistributedTest
from unit.simple_model import SimplePRMoEModel, SimpleMoEModel, sequence_dataloader
from unit.util import required_torch_version
@pytest.mark.parametrize("ep_size", [2, 4])
@pytest.mark.parametrize("use_residual", [True, False])
class TestMoE(DistributedTest):
world_size = 4
def test(self, ep_size, use_residual):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 16
# E+D -- ep_size = 2
# E only -- ep_size = 4
model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual)
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
dist_init_required=False)
#dist_init_required=False -- parameterize to True/False?
data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
@pytest.mark.parametrize("ep_size, use_residual", [(2, True), (2, False)])
class TestPRMoE(DistributedTest):
world_size = 4
def test(self, ep_size, use_residual):
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 16
# E+D -- ep_size = 2
# E only -- ep_size = 4
model = SimplePRMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual)
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
dist_init_required=False)
data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import pytest
from unit.common import DistributedTest
from unit.util import required_torch_version
from deepspeed.moe.layer import MoE
class MPU():
def __init__(self, tp_world_size):
self.rank = deepspeed.comm.get_rank()
self.world_size = deepspeed.comm.get_world_size()
self.tp_world_size = tp_world_size
for i in range(0, self.world_size, tp_world_size):
ranks = range(i, i + tp_world_size)
group = deepspeed.comm.new_group(ranks)
if self.rank in ranks:
self.tp_group = group
for i in range(0, tp_world_size):
ranks = range(i, self.world_size, tp_world_size)
group = deepspeed.comm.new_group(ranks)
if self.rank in ranks:
self.dp_group = group
def get_model_parallel_rank(self):
return self.rank % self.tp_world_size
def get_model_parallel_world_size(self):
return self.tp_world_size
def get_data_parallel_rank(self):
return self.rank // self.tp_world_size
def get_data_parallel_world_size(self):
return self.world_size // self.tp_world_size
def get_data_parallel_group(self):
return self.dp_group
def get_model_parallel_group(self):
return self.tp_group
@pytest.mark.parametrize("ep_size, tp_size", [(1, 2), (1, 4), (2, 2)])
@pytest.mark.parametrize("enable_expert_tp", [True, False])
@pytest.mark.parametrize("use_residual", [True, False])
class TestMOETensorParallel(DistributedTest):
world_size = 4
def test(self, ep_size, tp_size, enable_expert_tp, use_residual):
# TODO: replace this with a true parallel mlp in the future
# and run convergence tests
if not required_torch_version():
pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly")
config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}}
hidden_dim = 16
tensor_parallel_expert = torch.nn.Sequential(torch.nn.Linear(hidden_dim, 4 * hidden_dim // tp_size),
torch.nn.ReLU(),
torch.nn.Linear(4 * hidden_dim // tp_size, hidden_dim))
# set num experts to world size
world_size = deepspeed.comm.get_world_size()
model = MoE(
hidden_size=hidden_dim,
expert=tensor_parallel_expert,
num_experts=world_size,
ep_size=ep_size,
use_residual=use_residual,
enable_expert_tensor_parallelism=enable_expert_tp,
)
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
optimizer=optimizer,
dist_init_required=False,
mpu=MPU(tp_size))
assert model.num_local_experts == world_size // ep_size
if enable_expert_tp:
assert deepspeed.utils.groups._get_expert_model_parallel_world_size() == tp_size
else:
assert deepspeed.utils.groups._get_expert_model_parallel_world_size() == 1
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed.comm as dist
import numpy as np
import argparse
import deepspeed
import os
from deepspeed.runtime.comm.nccl import NcclBackend
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.accelerator import get_accelerator
from statistics import mean
timers = SynchronizedWallClockTimer()
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
args.local_rank = int(os.environ['LOCAL_RANK'])
get_accelerator().set_device(args.local_rank)
device = torch.device(get_accelerator().device_name(), args.local_rank)
size = dist.get_world_size()
rank = dist.get_rank()
backend = NcclBackend()
local_rank = args.local_rank
# Setting tensor_size (BERT-Large)
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
warmup = 10
iters = 10
# Warmup
for i in range(warmup):
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
time_list = []
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
print("Shape of the compressed buffer:", a_compressed.shape) if rank == 0 else None
for i in range(iters):
timers('compressed_allreduce').start()
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
#deepspeed.comm.all_reduce(a_compressed)
timers('compressed_allreduce').stop()
time_list.append(timers('compressed_allreduce').elapsed())
#timer_names = ['compressed_allreduce']
#timers.log(names=timer_names, normalizer=1, memory_breakdown=None)
places = 2
convert = 1e3
float_size = 4
if rank == 0:
for i in range(iters):
lat = time_list[i]
print("latency = ", lat * convert)
minlat = round(min(time_list) * convert)
maxlat = round(max(time_list) * convert)
meanlat = round(mean(time_list) * convert, places)
print("min, max, and mean = {} ms, {} ms, {} ms".format(minlat, maxlat, meanlat)) if rank == 0 else None
#print("tensor shape", a.shape)
duration = meanlat / 1e3
tput = ((tensor_size * 4) / duration)
print("algo throughput: %f Bytes/s, %f GB/s" % (tput, tput / 1e9)) if rank == 0 else None
size = tensor_size * 4
n = dist.get_world_size()
busbw = (size / duration) * (2 * (n - 1) / n)
print("busbw: %f GB/s" % (busbw / 1e9)) if rank == 0 else None
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from mpi4py import MPI
import torch
import deepspeed.comm as dist
import numpy as np
import deepspeed
from deepspeed.runtime.comm.mpi import MpiBackend
from deepspeed.accelerator import get_accelerator
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
# Change cuda_aware to True to test out CUDA-Aware MPI communication
backend = MpiBackend(cuda_aware=False)
local_rank = rank % get_accelerator().device_count()
device = torch.device(get_accelerator().device_name(), local_rank)
# A simulated compression function using deepspeed.comm
def torch_sim(a):
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
a_sign = None
worker_error = a - a_compressed
dist.all_reduce(a_compressed)
a_compressed.mul_(1 / dist.get_world_size())
a_server_sign = a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
a_list = torch.chunk(a_compressed, chunks=dist.get_world_size())
server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list]
a_sign_list = torch.chunk(a_server_sign, dist.get_world_size())
a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())])
rank = dist.get_rank()
server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank]
get_accelerator().synchronize()
dist.barrier()
return a_server_compressed, worker_error, server_error
tensor_size = 100 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
a_torch, worker_error_torch, server_error_torch = torch_sim(a)
get_accelerator().empty_cache()
a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank)
threshold = 1e-6
magnitude_threshold = 1e-6
diff_mask = (a_after - a_torch) > threshold
diff_server_mask = torch.chunk(diff_mask, size)[rank]
mpi_server = torch.chunk(a_after, size)[rank] + server_error
torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch
test_correctness = True
# If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic
# The test would skip those numbers that are too small in compensated_server_m
if test_correctness:
if torch.sum(diff_server_mask) == 0:
print('Successfully passed the test for MPI Backend at Rank {}'.format(rank))
else:
check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold
if torch.sum(check_mag_mask) == 0:
print('Successfully passed the test for MPI Backend at Rank {}'.format(rank))
else:
print('Fails at {} of positions'.format(torch.sum(check_mag_mask)))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from mpi4py import MPI
import torch
import deepspeed
from deepspeed.runtime.comm.mpi import MpiBackend
# Configure wall clock timer
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.accelerator import get_accelerator
from statistics import mean
timers = SynchronizedWallClockTimer()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
# Change cuda_aware to True to test out CUDA-Aware MPI communication
backend = MpiBackend(cuda_aware=False)
local_rank = rank % get_accelerator().device_count()
device = torch.device(get_accelerator().device_name(), local_rank)
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
warmup = 10
iters = 10
# Warmup
for i in range(warmup):
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
time_list = []
for i in range(iters):
timers('compressed_allreduce').start()
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
timers('compressed_allreduce').stop()
time_list.append(timers('compressed_allreduce').elapsed())
timer_names = ['compressed_allreduce']
timers.log(names=timer_names, normalizer=1, memory_breakdown=None)
places = 2
convert = 1e3
float_size = 4
if rank == 0:
for i in range(iters):
lat = time_list[i]
print("latency = ", lat * convert)
minlat = round(min(time_list) * convert)
maxlat = round(max(time_list) * convert)
meanlat = round(mean(time_list) * convert, places)
print("min, max, and mean = {} ms, {} ms, {} ms".format(minlat, maxlat, meanlat))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed.comm as dist
import numpy as np
import argparse
import deepspeed
import os
from deepspeed.runtime.comm.nccl import NcclBackend
from deepspeed.accelerator import get_accelerator
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
args.local_rank = int(os.environ['LOCAL_RANK'])
get_accelerator().set_device(args.local_rank)
device = torch.device(get_accelerator().device_name(), args.local_rank)
size = dist.get_world_size()
rank = dist.get_rank()
backend = NcclBackend()
local_rank = args.local_rank
# A simulated compression function using deepspeed.comm
def torch_sim(a):
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
a_sign = None
worker_error = a - a_compressed
dist.all_reduce(a_compressed)
a_compressed.mul_(1 / dist.get_world_size())
a_server_sign = a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
a_list = torch.chunk(a_compressed, chunks=dist.get_world_size())
server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list]
a_sign_list = torch.chunk(a_server_sign, dist.get_world_size())
a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())])
rank = dist.get_rank()
server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank]
get_accelerator().synchronize()
dist.barrier()
return a_server_compressed, worker_error, server_error
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
a_torch, worker_error_torch, server_error_torch = torch_sim(a)
get_accelerator().empty_cache()
a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank)
threshold = 1e-6
magnitude_threshold = 1e-6
diff_mask = (a_after - a_torch) > threshold
diff_server_mask = torch.chunk(diff_mask, size)[rank]
mpi_server = torch.chunk(a_after, size)[rank] + server_error
torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch
test_correctness = True
# If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic
# The test would skip those numbers that are too small in compensated_server_m
if test_correctness:
if torch.sum(diff_server_mask) == 0:
print('Successfully passed the test for NCCL Backend at Rank {}'.format(rank))
else:
check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold
if torch.sum(check_mag_mask) == 0:
print('Successfully passed the test for NCCL Backend at Rank {}'.format(rank))
else:
print('Fails at {} of positions'.format(torch.sum(check_mag_mask)))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
class OneLayerNet(torch.nn.Module):
def __init__(self, D_in, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(OneLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, D_out)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear1(h_relu)
return y_pred
def test_literal_device():
model = OneLayerNet(128, 128)
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8088'
os.environ['LOCAL_RANK'] = '0'
deepspeed.init_distributed(get_accelerator().communication_backend_name())
deepspeed.initialize(model=model, config='ds_config.json')
string = get_accelerator().device_name() #'xpu' or 'cuda'
string0 = get_accelerator().device_name(0) #'xpu:0' or 'cuda:0'
string1 = get_accelerator().device_name(1) #'xpu:1' or 'cuda:1'
assert string == 'xpu' or string == 'cuda'
assert string0 == 'xpu:0' or string0 == 'cuda:0'
assert string1 == 'xpu:1' or string1 == 'cuda:1'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#!/usr/bin/env python
# run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l)
#
# usage:
# ./unflatten_bench.py -t
# ./unflatten_bench.py -c
# kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof
import argparse
import gc
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from apex_C import flatten as flatten_apex
from apex_C import unflatten as unflatten_apex
util_ops = UtilsBuilder().load()
flatten = util_ops.flatten
unflatten = util_ops.unflatten
torch.manual_seed(0)
# emulate a small typical model weights
x = [
torch.rand((512, 512)).to(get_accelerator().device_name()),
torch.rand((512, 1024)).to(get_accelerator().device_name()),
torch.rand((512, 30000)).to(get_accelerator().device_name())
]
unflat_t = x * 30
# warm up and check that the same output is produced
flat_py = _flatten_dense_tensors(unflat_t)
flat_cpp = flatten(unflat_t)
flat_apex = flatten_apex(unflat_t)
#numel = flat_cpp.numel()
assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor"
assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor"
flat_t = flat_py
unflat_py = _unflatten_dense_tensors(flat_py, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_py[i]).all()
unflat_cpp = _unflatten_dense_tensors(flat_cpp, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_cpp[i]).all()
unflat_apex = _unflatten_dense_tensors(flat_apex, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_apex[i]).all()
# the programs being tested
def py():
for i in range(1000):
unflat = _unflatten_dense_tensors(flat_t, unflat_t)
def cpp():
for i in range(1000):
unflat = unflatten(flat_t, unflat_t)
def apex():
for i in range(1000):
unflat = unflatten_apex(flat_t, unflat_t)
#### cProfile ####
import cProfile
def cprofileme():
print("--------------- cProfile -----------------")
print("py")
cProfile.run("py()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("cpp")
cProfile.run("cpp()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("apex")
cProfile.run("apex()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
#### timeit ####
import timeit
def timeme():
print("--------------- timeit -----------------")
print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
#### line_profiler ####
# this one requires a special way to be called
# pip install line_profiler
# kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof
def line_profileme():
print("--------------- line_profier -----------------")
print("py")
profile(py)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("cpp")
profile(cpp)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("apex")
profile(apex)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", action='store_true')
parser.add_argument("-c", action='store_true')
parser.add_argument("-t", action='store_true')
args = parser.parse_args()
if args.l:
line_profileme()
elif args.c:
cprofileme()
elif args.t:
timeme()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.