python_code
stringlengths 0
456k
|
---|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed library
To build wheel on Windows:
1. Install pytorch, such as pytorch 1.12 + cuda 11.6.
2. Install visual cpp build tool.
3. Include cuda toolkit.
4. Launch cmd console with Administrator privilege for creating required symlink folders.
Create a new wheel via the following command:
build_win.bat
The wheel will be located at: dist/*.whl
"""
import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
torch_available = True
try:
import torch
except ImportError:
torch_available = False
print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \
'Please visit https://pytorch.org/ to see how to properly install torch on your system.')
from op_builder import get_default_compute_capabilities, OpBuilder
from op_builder.all_ops import ALL_OPS
from op_builder.builder import installed_cuda_version
# Fetch rocm state.
is_rocm_pytorch = OpBuilder.is_rocm_pytorch()
rocm_version = OpBuilder.installed_rocm_version()
RED_START = '\033[31m'
RED_END = '\033[0m'
ERROR = f"{RED_START} [ERROR] {RED_END}"
def abort(msg):
print(f"{ERROR} {msg}")
assert False, msg
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()]
install_requires = fetch_requirements('requirements/requirements.txt')
extras_require = {
'1bit': [], # add cupy based on cuda/rocm version
'1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),
'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),
'dev': fetch_requirements('requirements/requirements-dev.txt'),
'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),
'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),
'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),
'sparse': fetch_requirements('requirements/requirements-sparse_pruning.txt'),
'inf': fetch_requirements('requirements/requirements-inf.txt'),
'sd': fetch_requirements('requirements/requirements-sd.txt')
}
# Add specific cupy version to both onebit extension variants.
if torch_available and torch.cuda.is_available():
cupy = None
if is_rocm_pytorch:
rocm_major, rocm_minor = rocm_version
# XXX cupy support for rocm 5 is not available yet.
if rocm_major <= 4:
cupy = f"cupy-rocm-{rocm_major}-{rocm_minor}"
else:
cuda_major_ver, cuda_minor_ver = installed_cuda_version()
if (cuda_major_ver < 11) or ((cuda_major_ver == 11) and (cuda_minor_ver < 3)):
cupy = f"cupy-cuda{cuda_major_ver}{cuda_minor_ver}"
else:
cupy = f"cupy-cuda{cuda_major_ver}x"
if cupy:
extras_require['1bit'].append(cupy)
extras_require['1bit_mpi'].append(cupy)
# Make an [all] extra that installs all needed dependencies.
all_extras = set()
for extra in extras_require.items():
for req in extra[1]:
all_extras.add(req)
extras_require['all'] = list(all_extras)
cmdclass = {}
# For any pre-installed ops force disable ninja.
if torch_available:
from accelerator import get_accelerator
cmdclass['build_ext'] = get_accelerator().build_extension().with_options(use_ninja=False)
if torch_available:
TORCH_MAJOR = torch.__version__.split('.')[0]
TORCH_MINOR = torch.__version__.split('.')[1]
else:
TORCH_MAJOR = "0"
TORCH_MINOR = "0"
if torch_available and not torch.cuda.is_available():
# Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486.
print("[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only "
"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
"(compute capabilities 6.0, 6.1, 6.2)")
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = get_default_compute_capabilities()
ext_modules = []
# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.
BUILD_OP_PLATFORM = 1 if sys.platform == "win32" else 0
BUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))
print(f"DS_BUILD_OPS={BUILD_OP_DEFAULT}")
if BUILD_OP_DEFAULT:
assert torch_available, "Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops."
def command_exists(cmd):
if sys.platform == "win32":
result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 1
else:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 0
def op_envvar(op_name):
assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \
f"{op_name} is missing BUILD_VAR field"
return ALL_OPS[op_name].BUILD_VAR
def op_enabled(op_name):
env_var = op_envvar(op_name)
return int(os.environ.get(env_var, BUILD_OP_DEFAULT))
compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
install_ops = dict.fromkeys(ALL_OPS.keys(), False)
for op_name, builder in ALL_OPS.items():
op_compatible = builder.is_compatible()
compatible_ops[op_name] = op_compatible
# If op is requested but not available, throw an error.
if op_enabled(op_name) and not op_compatible:
env_var = op_envvar(op_name)
if env_var not in os.environ:
builder.warning(f"One can disable {op_name} with {env_var}=0")
abort(f"Unable to pre-compile {op_name}")
# If op is compatible but install is not enabled (JIT mode).
if is_rocm_pytorch and op_compatible and not op_enabled(op_name):
builder.hipify_extension()
# If op install enabled, add builder to extensions.
if op_enabled(op_name) and op_compatible:
assert torch_available, f"Unable to pre-compile {op_name}, please first install torch"
install_ops[op_name] = op_enabled(op_name)
ext_modules.append(builder.builder())
print(f'Install Ops={install_ops}')
# Write out version/git info.
git_hash_cmd = "git rev-parse --short HEAD"
git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
if command_exists('git') and 'DS_BUILD_STRING' not in os.environ:
try:
result = subprocess.check_output(git_hash_cmd, shell=True)
git_hash = result.decode('utf-8').strip()
result = subprocess.check_output(git_branch_cmd, shell=True)
git_branch = result.decode('utf-8').strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
git_branch = "unknown"
else:
git_hash = "unknown"
git_branch = "unknown"
def create_dir_symlink(src, dest):
if not os.path.islink(dest):
if os.path.exists(dest):
os.remove(dest)
assert not os.path.exists(dest)
os.symlink(src, dest)
if sys.platform == "win32":
# This creates a symbolic links on Windows.
# It needs Administrator privilege to create symlinks on Windows.
create_dir_symlink('..\\..\\csrc', '.\\deepspeed\\ops\\csrc')
create_dir_symlink('..\\..\\op_builder', '.\\deepspeed\\ops\\op_builder')
create_dir_symlink('..\\accelerator', '.\\deepspeed\\accelerator')
egg_info.manifest_maker.template = 'MANIFEST_win.in'
# Parse the DeepSpeed version string from version.txt.
version_str = open('version.txt', 'r').read().strip()
# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.
# Example: DS_BUILD_STRING=".dev20201022" python setup.py sdist bdist_wheel.
# Building wheel for distribution, update version file.
if 'DS_BUILD_STRING' in os.environ:
# Build string env specified, probably building for distribution.
with open('build.txt', 'w') as fd:
fd.write(os.environ.get('DS_BUILD_STRING'))
version_str += os.environ.get('DS_BUILD_STRING')
elif os.path.isfile('build.txt'):
# build.txt exists, probably installing from distribution.
with open('build.txt', 'r') as fd:
version_str += fd.read().strip()
else:
# None of the above, probably installing from source.
version_str += f'+{git_hash}'
torch_version = ".".join([TORCH_MAJOR, TORCH_MINOR])
bf16_support = False
# Set cuda_version to 0.0 if cpu-only.
cuda_version = "0.0"
nccl_version = "0.0"
# Set hip_version to 0.0 if cpu-only.
hip_version = "0.0"
if torch_available and torch.version.cuda is not None:
cuda_version = ".".join(torch.version.cuda.split('.')[:2])
if sys.platform != "win32":
if isinstance(torch.cuda.nccl.version(), int):
# This will break if minor version > 9.
nccl_version = ".".join(str(torch.cuda.nccl.version())[:2])
else:
nccl_version = ".".join(map(str, torch.cuda.nccl.version()[:2]))
if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():
bf16_support = torch.cuda.is_bf16_supported()
if torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:
hip_version = ".".join(torch.version.hip.split('.')[:2])
torch_info = {
"version": torch_version,
"bf16_support": bf16_support,
"cuda_version": cuda_version,
"nccl_version": nccl_version,
"hip_version": hip_version
}
print(f"version={version_str}, git_hash={git_hash}, git_branch={git_branch}")
with open('deepspeed/git_version_info_installed.py', 'w') as fd:
fd.write(f"version='{version_str}'\n")
fd.write(f"git_hash='{git_hash}'\n")
fd.write(f"git_branch='{git_branch}'\n")
fd.write(f"installed_ops={install_ops}\n")
fd.write(f"compatible_ops={compatible_ops}\n")
fd.write(f"torch_info={torch_info}\n")
print(f'install_requires={install_requires}')
print(f'compatible_ops={compatible_ops}')
print(f'ext_modules={ext_modules}')
# Parse README.md to make long_description for PyPI page.
thisdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:
readme_text = fin.read()
start_time = time.time()
setup(name='deepspeed',
version=version_str,
description='DeepSpeed library',
long_description=readme_text,
long_description_content_type='text/markdown',
author='DeepSpeed Team',
author_email='[email protected]',
url='http://deepspeed.ai',
project_urls={
'Documentation': 'https://deepspeed.readthedocs.io',
'Source': 'https://github.com/microsoft/DeepSpeed',
},
install_requires=install_requires,
extras_require=extras_require,
packages=find_packages(include=['deepspeed', 'deepspeed.*']),
include_package_data=True,
scripts=[
'bin/deepspeed', 'bin/deepspeed.pt', 'bin/ds', 'bin/ds_ssh', 'bin/ds_report', 'bin/ds_bench', 'bin/dsr',
'bin/ds_elastic'
],
classifiers=[
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
license='Apache Software License 2.0',
ext_modules=ext_modules,
cmdclass=cmdclass)
end_time = time.time()
print(f'deepspeed build time = {end_time - start_time} secs')
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
import json
from parse_aio_stats import READ_SPEED, WRITE_SPEED, get_sorted_results
from perf_sweep_utils import BENCH_LOG_DIR, READ_LOG_DIR, WRITE_LOG_DIR
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir',
type=str,
default=BENCH_LOG_DIR,
help=f'Folder of performance sweep logs. Default is {os.path.join(".", BENCH_LOG_DIR)}')
args = parser.parse_args()
print(f'args = {args}')
return args
def validate_args(args):
for d in [READ_LOG_DIR, WRITE_LOG_DIR]:
log_dir = os.path.join(args.log_dir, d)
if not os.path.isdir(log_dir):
print(f'{log_dir} folder is not existent')
return False
return True
def convert_to_param(key):
assert len(key) == 6
return {
"single_submit": "true" if key[0] == "single" else "false",
"overlap_events": "true" if key[1] == "overlap" else "false",
"thread_count": int(key[3]),
"queue_depth": int(key[4]),
"block_size": int(key[5])
}
def generate_aio_param(read_log_dir, write_log_dir):
_, read_results = get_sorted_results(read_log_dir, READ_SPEED)
_, write_results = get_sorted_results(write_log_dir, WRITE_SPEED)
combined_perf = {key[1:]: value for key, value in read_results.items()}
for key, value in write_results.items():
new_key = key[1:]
if new_key in combined_perf:
combined_perf[new_key] += value
else:
combined_perf[new_key] = 0
optimal_key = None
optimal_perf = 0.0
for key, value in combined_perf.items():
if value > optimal_perf:
optimal_perf = value
optimal_key = key
aio_param = {"aio": convert_to_param(optimal_key)}
read_perf_keys = {key[1:]: key for key in read_results.keys()}
write_perf_keys = {key[1:]: key for key in write_results.keys()}
optimal_config_read = read_results.get(read_perf_keys[optimal_key], None)
optimal_config_write = write_results.get(write_perf_keys[optimal_key], None)
print(f'Best performance (GB/sec): read = {optimal_config_read:5.2f}, write = {optimal_config_write:5.2f}')
print(json.dumps(aio_param, indent=3))
def main():
print('Generate aio param')
args = parse_arguments()
if not validate_args(args):
quit()
read_log_dir = os.path.join(args.log_dir, READ_LOG_DIR)
write_log_dir = os.path.join(args.log_dir, WRITE_LOG_DIR)
generate_aio_param(read_log_dir, write_log_dir)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
SCRIPT_PREFIX = '_aio_bench'
WRITE_OP_DESC = 'write'
READ_OP_DESC = 'read'
READ_IO_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_io'
WRITE_IO_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_io'
BENCH_LOG_DIR = f'{SCRIPT_PREFIX}_logs'
READ_LOG_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_logs'
WRITE_LOG_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_logs'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
BYTES_PER_GB = 1024**3
LOG_TIDS = [0]
def task_log(tid, msg):
if tid in LOG_TIDS:
print(f'tid {tid}: {msg}')
def task_barrier(barrier, num_parties):
assert barrier.parties == num_parties
barrier.wait()
assert barrier.broken == False
def report_results(args, read_op, pool_results):
#print(f'pool_results = {pool_results}')
io_string = 'Read' if read_op else 'Write'
if None in pool_results:
print(f'Failure in one of {args.threads} {io_string} processes')
return
total_bytes = sum([num_bytes for _, _, num_bytes in pool_results])
task_latency_sec = max([sec for _, sec, _ in pool_results])
task_speed_GB = total_bytes / task_latency_sec / BYTES_PER_GB
print(f'Task {io_string} Latency = {task_latency_sec} sec')
print(f'Task {io_string} Speed = {task_speed_GB} GB/sec')
e2e_latency_sec = max([sec for sec, _, _ in pool_results])
e2e_speed_GB = total_bytes / e2e_latency_sec / BYTES_PER_GB
print(f'E2E {io_string} Latency = {e2e_latency_sec} sec')
print(f'E2E {io_string} Speed = {e2e_speed_GB} GB/sec')
def refine_integer_value(value):
unit_dict = {'K': 1024, 'M': 1024**2, 'G': 1024**3}
if value[-1] in list(unit_dict.keys()):
int_value = int(value[:-1]) * unit_dict[value[-1]]
return int_value
return int(value)
def refine_args(args):
if args.write_size and type(args.write_size) == str:
args.write_size = refine_integer_value(args.write_size)
if args.block_size and type(args.block_size) == str:
args.block_size = refine_integer_value(args.block_size)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
from deepspeed.ops.op_builder import AsyncIOBuilder
assert AsyncIOBuilder().is_compatible()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
import multiprocessing as mp
from ds_aio_basic import aio_basic_multiprocessing
from ds_aio_handle import aio_handle_multiprocessing
from test_ds_aio_utils import refine_args
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--read_file', type=str, default=None, help='Read file.')
parser.add_argument('--write_file', type=str, default=None, help='Write file.')
parser.add_argument('--write_size', type=str, default=None, help='Number of bytes to write.')
parser.add_argument('--block_size', type=str, default='1M', help='I/O block size.')
parser.add_argument('--queue_depth', type=int, default=32, help='I/O queue depth.')
parser.add_argument('--threads', type=int, default=1, help='Thread parallelism count.')
parser.add_argument('--single_submit',
action='store_true',
help='Submit I/O requests in singles (default is submit queue_depth amount at once.).')
parser.add_argument('--overlap_events',
action='store_true',
help='Overlap I/O submission and completion requests.')
parser.add_argument('--validate', action='store_true', help='Perform validation in library.')
parser.add_argument('--handle', action='store_true', help='Use AIO handle.')
parser.add_argument('--loops', type=int, default=1, help='Count of operation repetitions')
parser.add_argument('--io_parallel', type=int, default=None, help='Per iop parallelism')
parser.add_argument('--gpu', action='store_true', help='Use GPU memory')
parser.add_argument('--use_accelerator_pin_memory',
action='store_true',
help='Obtain pinned (CPU page-locked) tensors from accelerator')
args = parser.parse_args()
print(f'args = {args}')
return args
def validate_args(args):
if args.read_file and not os.path.isfile(args.read_file):
print(f'args validation error: {args.read_file} not found')
return False
return True
def main():
print(f'Testing deepspeed_aio python frontend')
args = parse_arguments()
refine_args(args)
if not validate_args(args):
quit()
mp.set_start_method('spawn')
multiprocess_function = aio_handle_multiprocessing if args.handle else aio_basic_multiprocessing
if args.read_file:
multiprocess_function(args, True)
if args.write_file:
multiprocess_function(args, False)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import sys
import argparse
import json
import itertools
import subprocess
import shutil
from test_ds_aio_utils import refine_integer_value
from perf_sweep_utils import READ_OP_DESC, WRITE_OP_DESC, BENCH_LOG_DIR, \
READ_IO_DIR, WRITE_IO_DIR, READ_LOG_DIR, WRITE_LOG_DIR
from deepspeed.ops.op_builder import AsyncIOBuilder
OTHER_OPTIONS = '--handle'
PERF_SCRIPT = 'test_ds_aio.py'
DEFAULT_SWEEP_CONFIG = {
"block_size": ["128K", "256K"],
"queue_depth": [4, 16, 32],
"overlap_events": [True, False],
"io_parallel": [2, 8],
"single_submit": [False]
}
class Job(object):
def __init__(self, cmd_line, output_file=None, work_dir=None):
self.cmd_line = cmd_line
self.output_file = output_file
self.work_dir = work_dir
self.output_fd = None
def cmd(self):
return self.cmd_line
def get_stdout(self):
return self.output_fd
def get_stderr(self):
return self.output_fd
def get_cwd(self):
return self.work_dir
def open_output_file(self):
if self.output_file is not None:
self.output_fd = open(self.output_file, 'w')
def close_output_file(self):
if self.output_fd is not None:
self.output_fd.close()
self.output_fd = None
class SweepConfig(object):
def __init__(self, args):
self.nvme_dir = args.nvme_dir
self.io_size = args.io_size
self.search_space = get_sweep_config_dict(args.sweep_config)
self.read = not args.no_read
self.write = not args.no_write
self.flush_cache = not args.no_sudo
self.log_dir = args.log_dir
self.loops = args.loops
self.other_options = f'{OTHER_OPTIONS} --loops {args.loops}'
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--nvme_dir',
required=True,
type=str,
help='Directory in which to perform I/O tests. A writeable directory on a NVMe device.')
parser.add_argument('--sweep_config', type=str, default=None, help='Performance sweep configuration json file.')
parser.add_argument('--no_read', action='store_true', help='Disable read performance measurements.')
parser.add_argument('--no_write', action='store_true', help='Disable write performance measurements.')
parser.add_argument('--io_size',
type=str,
default="400M",
help='Number of I/O bytes to read/write for performance measurements.')
parser.add_argument(
'--no_sudo',
action='store_true',
help=
'Run without sudo access. Page cache will not be flushed and reported read speeds may be higher than actual.')
parser.add_argument(
'--log_dir',
type=str,
default=BENCH_LOG_DIR,
help=f'Output directory for performance log files. Default is {os.path.join(".", BENCH_LOG_DIR)}')
parser.add_argument('--loops', type=int, default=1, help='Count of operation repetitions')
args = parser.parse_args()
print(f'args = {args}')
return args
def dump_cmd_lines(cmd_lines):
print(f'cmd line count = {len(cmd_lines)}')
for i, cmd in enumerate(cmd_lines):
print(f'{i}: {cmd}')
def get_sweep_config_dict(sweep_config_json):
if sweep_config_json is None:
return DEFAULT_SWEEP_CONFIG
with open(sweep_config_json) as fp:
sweep_config = json.load(fp)
return sweep_config
def get_sweep_cmd_lines(sweep_config_dict):
def flatten_options(key, value_list):
flat_list = []
for v in value_list:
if not type(v) is bool:
flat_list.append(f'--{key} {v}')
elif v:
flat_list.append(f'--{key}')
else:
flat_list.append(' ')
return flat_list
flat_list = [flatten_options(key, value) for key, value in sweep_config_dict.items()]
cmd_list = list(itertools.product(*flat_list))
cmd_list = [list(cmd) for cmd in cmd_list]
#dump_cmd_lines(cmd_list)
return cmd_list
def run_job(job):
args = ' '.join(job.cmd())
print(f'args = {args}')
job.open_output_file()
proc = subprocess.run(args=args, shell=True, stdout=job.get_stdout(), stderr=job.get_stderr(), cwd=job.get_cwd())
job.close_output_file()
assert proc.returncode == 0, \
f"This command failed: {job.cmd()}"
def launch_sweep(sweep_jobs, sync_job, flush_cache_job):
for perf_job in sweep_jobs:
if flush_cache_job is not None:
run_job(sync_job)
run_job(flush_cache_job)
run_job(perf_job)
run_job(sync_job)
def create_cmd_tags(cmd_line):
tags = {}
for param_value in cmd_line:
fields = param_value.split()
if len(fields) == 1:
tags[fields[0]] = None
elif len(fields) == 2:
tags[fields[0]] = fields[1]
return tags
def get_log_file(io_op_desc, cmd_line):
QUEUE_DEPTH = "--queue_depth"
BLOCK_SIZE = "--block_size"
SINGLE_SUBMIT = "--single_submit"
OVERLAP_EVENTS = "--overlap_events"
THREAD_COUNT = "--threads"
IO_PARALLEL = "--io_parallel"
tag_map = {
QUEUE_DEPTH: "d",
BLOCK_SIZE: "bs",
SINGLE_SUBMIT: "single",
OVERLAP_EVENTS: "overlap",
THREAD_COUNT: "t",
IO_PARALLEL: "p"
}
tag_default = {
QUEUE_DEPTH: 1,
BLOCK_SIZE: "1M",
SINGLE_SUBMIT: "block",
OVERLAP_EVENTS: "sequential",
THREAD_COUNT: 1,
IO_PARALLEL: 1
}
def get_default_value(tag):
value = tag_default[tag]
if tag in [SINGLE_SUBMIT, OVERLAP_EVENTS]:
return value
return f'{tag_map[tag]}{value}'
def get_config_value(tag, value):
tag_key = tag_map[tag]
if value is None:
return tag_key
return f'{tag_key}{value}'
tag_list = [SINGLE_SUBMIT, OVERLAP_EVENTS, THREAD_COUNT, IO_PARALLEL, QUEUE_DEPTH, BLOCK_SIZE]
log_tags = [io_op_desc]
cmd_tags = create_cmd_tags(cmd_line)
for tag in tag_list:
if tag in cmd_tags:
log_tags.append(get_config_value(tag, cmd_tags[tag]))
else:
log_tags.append(get_default_value(tag))
log_file = '_'.join(log_tags)
log_file += '.txt'
return log_file
def create_perf_jobs(io_op_desc, log_dir, cmd_lines):
py_cmd = ['python', os.path.join(script_path(), PERF_SCRIPT)]
perf_jobs = []
for cmd in cmd_lines:
log_file = os.path.join(log_dir, get_log_file(io_op_desc, cmd))
job = Job(cmd_line=py_cmd + cmd, output_file=log_file)
perf_jobs.append(job)
return perf_jobs
def script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def async_io_setup():
return AsyncIOBuilder().is_compatible()
def get_block_size_and_count(io_bytes):
block_size = 1
block_count = io_bytes
bytes_in_KB = 1024
while block_count % bytes_in_KB == 0:
block_size *= bytes_in_KB
block_count /= bytes_in_KB
return int(block_size), int(block_count)
def create_read_file(sweep_config):
read_folder = os.path.join(sweep_config.nvme_dir, f'{READ_IO_DIR}')
os.makedirs(read_folder, exist_ok=True)
read_file_name = os.path.join(read_folder, f'random_{sweep_config.io_size}B.pt')
block_size, block_count = get_block_size_and_count(refine_integer_value(sweep_config.io_size))
dd_job = Job(cmd_line=[f'dd if=/dev/urandom of={read_file_name} bs={block_size} count={block_count}'])
print(f'[Start] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....')
run_job(dd_job)
print(f'[Done] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....')
return read_folder, read_file_name
def remove_folder(folder):
assert os.path.isdir(folder), f"Error: cannot remove {folder} - folder not found"
shutil.rmtree(folder)
def run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines):
read_folder, read_file_name = create_read_file(sweep_config)
read_option = f'--read_file {read_file_name}'
read_cmd_lines = [[f'{read_option} {sweep_config.other_options}'] + cmd for cmd in cmd_lines]
#dump_cmd_lines(read_cmd_lines)
log_folder = os.path.join(sweep_config.log_dir, f'{READ_LOG_DIR}')
os.makedirs(log_folder, exist_ok=True)
perf_jobs = create_perf_jobs(io_op_desc=READ_OP_DESC, log_dir=log_folder, cmd_lines=read_cmd_lines)
launch_sweep(sweep_jobs=perf_jobs, sync_job=sync_job, flush_cache_job=flush_cache_job)
remove_folder(read_folder)
def run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines):
write_folder = os.path.join(sweep_config.nvme_dir, f'{WRITE_IO_DIR}')
os.makedirs(write_folder, exist_ok=True)
write_file_name = os.path.join(write_folder, f'random_{sweep_config.io_size}B.pt')
write_option = f'--write_size {sweep_config.io_size} --write_file {write_file_name}'
write_cmd_lines = [[f'{write_option} {sweep_config.other_options}'] + cmd for cmd in cmd_lines]
#dump_cmd_lines(write_cmd_lines)
log_folder = os.path.join(sweep_config.log_dir, f'{WRITE_LOG_DIR}')
os.makedirs(log_folder, exist_ok=True)
perf_jobs = create_perf_jobs(io_op_desc=WRITE_OP_DESC, log_dir=log_folder, cmd_lines=write_cmd_lines)
launch_sweep(sweep_jobs=perf_jobs, sync_job=sync_job, flush_cache_job=flush_cache_job)
remove_folder(write_folder)
def main():
print("Running performance sweep of deepspeed nvme library")
if not async_io_setup():
error_msg = """
Failing because environment is not properly configured for deepspeed async i/o module.
Possible fix: apt install libaio-dev.
"""
print(error_msg)
quit()
args = parse_arguments()
sweep_config = SweepConfig(args)
cmd_lines = get_sweep_cmd_lines(sweep_config.search_space)
if sweep_config.flush_cache:
flush_cache_job = Job(cmd_line=['sudo', 'bash -c', "'echo 1 > /proc/sys/vm/drop_caches'"])
else:
flush_cache_job = None
sync_job = Job(cmd_line=['sync'])
if sweep_config.read:
run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines)
if sweep_config.write:
run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
import os
import time
from multiprocessing import Pool, Barrier
from test_ds_aio_utils import report_results, task_log, task_barrier
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
def pre_basic(args, tid, read_op):
io_string = "Read" if read_op else "Write"
num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size
file = args.read_file if read_op else f'{args.write_file}.{tid}'
task_log(tid, f'Allocate tensor of size {num_bytes} bytes')
buffer = get_accelerator().pin_memory(torch.empty(num_bytes, dtype=torch.uint8, device='cpu'))
task_log(tid, f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}')
ctxt = {}
ctxt['file'] = file
ctxt['num_bytes'] = num_bytes
ctxt['buffer'] = buffer
ctxt['elapsed_sec'] = 0
return ctxt
def pre_basic_read(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, True)
return ctxt
def pre_basic_write(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, False)
return ctxt
def post_basic(pool_params):
_, _, ctxt = pool_params
ctxt["buffer"].detach()
ctxt["buffer"] = None
return ctxt
def main_basic_read(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_read(ctxt['buffer'], ctxt['file'], args.block_size, args.queue_depth,
args.single_submit, args.overlap_events, args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_basic_write(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_write(ctxt['buffer'], ctxt['file'], args.block_size, args.queue_depth,
args.single_submit, args.overlap_events, args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def get_schedule(args, read_op):
schedule = {}
if read_op:
schedule['pre'] = pre_basic_read
schedule['post'] = post_basic
schedule['main'] = main_basic_read
else:
schedule['pre'] = pre_basic_write
schedule['post'] = post_basic
schedule['main'] = main_basic_write
return schedule
def _aio_handle_tasklet(pool_params):
args, tid, read_op = pool_params
# Create schedule
schedule = get_schedule(args, read_op)
task_log(tid, f'schedule = {schedule}')
task_barrier(aio_barrier, args.threads)
# Run pre task
task_log(tid, f'running pre-task')
ctxt = schedule["pre"]((args, tid))
task_barrier(aio_barrier, args.threads)
# Run main tasks in a loop
ctxt["main_task_sec"] = 0
for i in range(args.loops):
task_log(tid, f'running main task {i}')
start_time = time.time()
ctxt = schedule["main"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
stop_time = time.time()
ctxt["main_task_sec"] += stop_time - start_time
# Run post task
task_log(tid, f'running post-task')
ctxt = schedule["post"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops
def _init_tasklet(b):
global aio_barrier
aio_barrier = b
def aio_basic_multiprocessing(args, read_op):
b = Barrier(args.threads)
pool_params = [(args, p, read_op) for p in range(args.threads)]
with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
pool_results = p.map(_aio_handle_tasklet, pool_params)
report_results(args, read_op, pool_results)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
READ_SPEED = 'read_speed'
WRITE_SPEED = 'write_speed'
PERF_METRICS = [READ_SPEED, WRITE_SPEED]
METRIC_SEARCH = {READ_SPEED: 'E2E Read Speed', WRITE_SPEED: 'E2E Write Speed'}
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, required=True, help='Folder of statistics logs')
parser.add_argument('--metric',
type=str,
required=True,
help='Performance metric to report: [read_speed|write_speed]')
args = parser.parse_args()
print(f'args = {args}')
return args
def extract_value(key, file):
INVALID_PREFIXES = ["ds"]
for p in INVALID_PREFIXES:
if key.startswith(p):
return key
try:
if key[0] in ['t', 'd', 'p']:
return int(key[1:])
if key.startswith("bs"):
if key.endswith('K'):
v = key[2:].split('K')
return int(v[0]) * 1024
elif key.endswith('M'):
v = key[2:].split('M')
return int(v[0]) * 1024 * 1024
else:
return int(key[2:])
except:
print(f"{file}: extract_value fails on {key}")
return None
return key
def get_file_key(file):
f, _ = os.path.splitext(os.path.basename(file))
fields = f.split('_')
values = [extract_value(k, file) for k in fields]
return tuple(values)
def get_thread_count(file):
f, _ = os.path.splitext(os.path.basename(file))
fields = f.split('_')
for key in fields:
if key[0] == 't':
return int(key[1:])
return 1
"""
Extract performance metric from log file.
Sample file lines are:
Task Read Latency = 0.031647682189941406 sec
Task Read Speed = 12.342926020792527 GB/sec
E2E Read Latency = 0.031697988510131836 sec
E2E Read Speed = 12.323337169333062 GB/sec
For the above sample, -metric = "read_speed" corresponds to "E2E Read Speed", and 12.32 will be returned
"""
def get_metric(file, metric):
thread_count = get_thread_count(file)
with open(file) as f:
for line in f.readlines():
if line.startswith(METRIC_SEARCH[metric]):
if metric in [READ_SPEED, WRITE_SPEED]:
fields = line.split()
return float(fields[-2])
else:
fields = line.split('=')
return float(fields[-1])
return None
def validate_args(args):
if not args.metric in PERF_METRICS:
print(f'{args.metric} is not a valid performance metrics')
return False
if not os.path.isdir(args.log_dir):
print(f'{args.log_dir} folder is not existent')
return False
return True
def get_results(log_files, metric):
results = {}
for f in log_files:
file_key = get_file_key(f)
value = get_metric(f, metric)
results[file_key] = value
return results
def get_sorted_results(log_dir, metric):
log_files = [f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, f))]
log_files_path = [os.path.join(log_dir, f) for f in log_files]
results = get_results(log_files_path, metric)
result_keys = list(results.keys())
sorted_keys = sorted(result_keys)
return sorted_keys, results
def main():
print("Parsing aio statistics")
args = parse_arguments()
if not validate_args(args):
quit()
sorted_keys, results = get_sorted_results(args.log_dir, args.metric)
for k in sorted_keys:
print(f'{k} = {results[k]}')
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
import os
import time
from multiprocessing import Pool, Barrier
from test_ds_aio_utils import report_results, task_log, task_barrier
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
def pre_handle(args, tid, read_op):
io_string = "Read" if read_op else "Write"
num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size
file = args.read_file if read_op else f'{args.write_file}.{tid}'
io_parallel = args.io_parallel if args.io_parallel else 1
handle = AsyncIOBuilder().load().aio_handle(args.block_size, args.queue_depth, args.single_submit,
args.overlap_events, io_parallel)
task_log(tid, f'Created deepspeed aio handle')
if args.gpu:
buffer = torch.empty(num_bytes, dtype=torch.uint8, device=get_accelerator().device_name())
else:
if args.use_accelerator_pin_memory:
buffer = get_accelerator().pin_memory(torch.empty(num_bytes, dtype=torch.uint8, device='cpu'))
else:
buffer = handle.new_cpu_locked_tensor(num_bytes, torch.empty(0, dtype=torch.uint8))
task_log(tid, f'Allocate tensor of size {num_bytes} bytes')
ctxt = {}
ctxt['file'] = file
ctxt['num_bytes'] = num_bytes
ctxt['handle'] = handle
ctxt['buffer'] = buffer
ctxt['elapsed_sec'] = 0
task_log(tid, f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}')
return ctxt
def pre_handle_read(pool_params):
args, tid = pool_params
ctxt = pre_handle(args, tid, True)
return ctxt
def pre_handle_write(pool_params):
args, tid = pool_params
ctxt = pre_handle(args, tid, False)
return ctxt
def post_handle(pool_params):
_, _, ctxt = pool_params
ctxt["buffer"].detach()
ctxt["buffer"] = None
return ctxt
def main_parallel_read(pool_params):
args, tid, ctxt = pool_params
handle = ctxt['handle']
start_time = time.time()
ret = handle.pread(ctxt['buffer'], ctxt['file'], args.validate, True)
assert ret != -1
handle.wait()
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_parallel_write(pool_params):
args, tid, ctxt = pool_params
handle = ctxt['handle']
start_time = time.time()
ret = handle.pwrite(ctxt['buffer'], ctxt['file'], args.validate, True)
assert ret != -1
handle.wait()
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_handle_read(pool_parms):
args, tid, ctxt = pool_parms
handle = ctxt['handle']
start_time = time.time()
ret = handle.read(ctxt['buffer'], ctxt['file'], args.validate)
assert ret != -1
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_handle_write(pool_parms):
args, tid, ctxt = pool_parms
handle = ctxt['handle']
start_time = time.time()
ret = handle.write(ctxt['buffer'], ctxt['file'], args.validate)
assert ret != -1
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def get_schedule(args, read_op):
schedule = {}
if read_op:
schedule['pre'] = pre_handle_read
schedule['post'] = post_handle
schedule['main'] = main_parallel_read if args.io_parallel else main_handle_read
else:
schedule['pre'] = pre_handle_write
schedule['post'] = post_handle
schedule['main'] = main_parallel_write if args.io_parallel else main_handle_write
return schedule
def _aio_handle_tasklet(pool_params):
args, tid, read_op = pool_params
# Create schedule
schedule = get_schedule(args, read_op)
task_log(tid, f'schedule = {schedule}')
task_barrier(aio_barrier, args.threads)
# Run pre task
task_log(tid, f'running pre-task')
ctxt = schedule["pre"]((args, tid))
task_barrier(aio_barrier, args.threads)
# Run main tasks in a loop
ctxt["main_task_sec"] = 0
for i in range(args.loops):
task_log(tid, f'running main task {i}')
start_time = time.time()
ctxt = schedule["main"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
stop_time = time.time()
ctxt["main_task_sec"] += stop_time - start_time
# Run post task
task_log(tid, f'running post-task')
ctxt = schedule["post"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops
def _init_tasklet(b):
global aio_barrier
aio_barrier = b
def aio_handle_multiprocessing(args, read_op):
b = Barrier(args.threads)
pool_params = [(args, p, read_op) for p in range(args.threads)]
with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
pool_results = p.map(_aio_handle_tasklet, pool_params)
report_results(args, read_op, pool_results)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
try:
# This is populated by setup.py
from .git_version_info_installed import * # noqa: F401
except ModuleNotFoundError:
import os
if os.path.isfile('version.txt'):
# Will be missing from checkouts that haven't been installed (e.g., readthedocs)
version = open('version.txt', 'r').read().strip()
else:
version = "0.0.0"
git_hash = '[none]'
git_branch = '[none]'
from .ops.op_builder.all_ops import ALL_OPS
installed_ops = dict.fromkeys(ALL_OPS.keys(), False)
compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"}
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from datetime import timedelta
#############################################
# Torch distributed constants
#############################################
TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
# Default process group wide timeout, if applicable.
# This only applies to the gloo and nccl backends
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
default_pg_timeout = timedelta(minutes=30)
INFERENCE_GENERIC_MODE = 'generic'
INFERENCE_SPECIALIZED_MODE = 'specialized'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import types
import json
from typing import Optional, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from packaging import version as pkg_version
from . import ops
from . import module_inject
from .accelerator import get_accelerator
from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpeedSchedulerCallable
from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER
from .runtime.hybrid_engine import DeepSpeedHybridEngine
from .runtime.pipe.engine import PipelineEngine
from .inference.engine import InferenceEngine
from .inference.config import DeepSpeedInferenceConfig
from .runtime.lr_schedules import add_tuning_arguments
from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError
from .runtime.activation_checkpointing import checkpointing
from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from .module_inject import replace_transformer_layer, revert_transformer_layer
from .utils import log_dist, OnDevice, logger
from .comm.comm import init_distributed
from .runtime import zero
from .runtime import DeepSpeedOptimizer, ZeROOptimizer
from .pipe import PipelineModule
from .git_version_info import version, git_hash, git_branch
def _parse_version(version_str):
'''Parse a version string and extract the major, minor, and patch versions.'''
ver = pkg_version.parse(version_str)
return ver.major, ver.minor, ver.micro
# Export version information
__version__ = version
__version_major__, __version_minor__, __version_patch__ = _parse_version(__version__)
__git_hash__ = git_hash
__git_branch__ = git_branch
# Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init
dist = None
def initialize(args=None,
model: torch.nn.Module = None,
optimizer: Optional[Union[Optimizer, DeepSpeedOptimizerCallable]] = None,
model_parameters: Optional[torch.nn.Module] = None,
training_data: Optional[torch.utils.data.Dataset] = None,
lr_scheduler: Optional[Union[_LRScheduler, DeepSpeedSchedulerCallable]] = None,
mpu=None,
dist_init_required: Optional[bool] = None,
collate_fn=None,
config=None,
config_params=None):
"""Initialize the DeepSpeed Engine.
Arguments:
args: an object containing local_rank and deepspeed_config fields.
This is optional if `config` is passed.
model: Required: nn.module class before apply any wrappers
optimizer: Optional: a user defined Optimizer or Callable that returns an Optimizer object.
This overrides any optimizer definition in the DeepSpeed json config.
model_parameters: Optional: An iterable of torch.Tensors or dicts.
Specifies what Tensors should be optimized.
training_data: Optional: Dataset of type torch.utils.data.Dataset
lr_scheduler: Optional: Learning Rate Scheduler Object or a Callable that takes an Optimizer and returns a Scheduler object.
The scheduler object should define a get_lr(), step(), state_dict(), and load_state_dict() methods
mpu: Optional: A model parallelism unit object that implements
get_{model,data}_parallel_{rank,group,world_size}()
dist_init_required: Optional: None will auto-initialize torch distributed if needed,
otherwise the user can force it to be initialized or not via boolean.
collate_fn: Optional: Merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
config: Optional: Instead of requiring args.deepspeed_config you can pass your deepspeed config
as an argument instead, as a path or a dictionary.
config_params: Optional: Same as `config`, kept for backwards compatibility.
Returns:
A tuple of ``engine``, ``optimizer``, ``training_dataloader``, ``lr_scheduler``
* ``engine``: DeepSpeed runtime engine which wraps the client model for distributed training.
* ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if
optimizer is specified in json config else ``None``.
* ``training_dataloader``: DeepSpeed dataloader if ``training_data`` was supplied,
otherwise ``None``.
* ``lr_scheduler``: Wrapped lr scheduler if user ``lr_scheduler`` is passed, or
if ``lr_scheduler`` specified in JSON configuration. Otherwise ``None``.
"""
log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
__git_branch__),
ranks=[0])
# Disable zero.Init context if it's currently enabled
zero.partition_parameters.shutdown_init_context()
assert model is not None, "deepspeed.initialize requires a model"
global dist
from deepspeed import comm as dist
dist_backend = get_accelerator().communication_backend_name()
dist.init_distributed(dist_backend=dist_backend, dist_init_required=dist_init_required)
# Set config using config_params for backwards compat
if config is None and config_params is not None:
config = config_params
# Check for deepscale_config for backwards compat
if hasattr(args, "deepscale_config") and args.deepscale_config is not None:
logger.warning("************ --deepscale_config is deprecated, please use --deepspeed_config ************")
if hasattr(args, "deepspeed_config"):
assert (args.deepspeed_config is
None), "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
args.deepscale_config = None
# Check that we have only one config passed
if hasattr(args, "deepspeed_config") and args.deepspeed_config is not None:
assert config is None, "Not sure how to proceed, we were given deepspeed configs in the deepspeed arguments and deepspeed.initialize() function call"
config = args.deepspeed_config
assert config != None, "DeepSpeed requires --deepspeed_config to specify configuration file"
if not isinstance(model, PipelineModule):
config_class = DeepSpeedConfig(config, mpu)
if config_class.hybrid_engine.enabled:
engine = DeepSpeedHybridEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
else:
engine = DeepSpeedEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
else:
assert mpu is None, "mpu must be None with pipeline parallelism"
mpu = model.mpu()
config_class = DeepSpeedConfig(config, mpu)
engine = PipelineEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
return_items = [engine, engine.optimizer, engine.training_dataloader, engine.lr_scheduler]
return tuple(return_items)
def _add_core_arguments(parser):
r"""Helper (internal) function to update an argument parser with an argument group of the core DeepSpeed arguments.
The core set of DeepSpeed arguments include the following:
1) --deepspeed: boolean flag to enable DeepSpeed
2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
This is a helper function to the public add_config_arguments()
Arguments:
parser: argument parser
Return:
parser: Updated Parser
"""
group = parser.add_argument_group('DeepSpeed', 'DeepSpeed configurations')
group.add_argument('--deepspeed',
default=False,
action='store_true',
help='Enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
group.add_argument('--deepspeed_config', default=None, type=str, help='DeepSpeed json configuration file.')
group.add_argument('--deepscale',
default=False,
action='store_true',
help='Deprecated enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
group.add_argument('--deepscale_config',
default=None,
type=str,
help='Deprecated DeepSpeed json configuration file.')
group.add_argument('--deepspeed_mpi',
default=False,
action='store_true',
help="Run via MPI, this will attempt to discover the necessary variables to initialize torch "
"distributed from the MPI environment")
return parser
def add_config_arguments(parser):
r"""Update the argument parser to enabling parsing of DeepSpeed command line arguments.
The set of DeepSpeed arguments include the following:
1) --deepspeed: boolean flag to enable DeepSpeed
2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
Arguments:
parser: argument parser
Return:
parser: Updated Parser
"""
parser = _add_core_arguments(parser)
return parser
def default_inference_config():
"""
Return a default DeepSpeed inference configuration dictionary.
"""
return DeepSpeedInferenceConfig().dict()
def init_inference(model, config=None, **kwargs):
"""Initialize the DeepSpeed InferenceEngine.
Description: all four cases are valid and supported in DS init_inference() API.
# Case 1: user provides no config and no kwargs. Default config will be used.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model)
string = generator("DeepSpeed is")
print(string)
# Case 2: user provides a config and no kwargs. User supplied config will be used.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model, config=config)
string = generator("DeepSpeed is")
print(string)
# Case 3: user provides no config and uses keyword arguments (kwargs) only.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model,
mp_size=world_size,
dtype=torch.half,
replace_with_kernel_inject=True)
string = generator("DeepSpeed is")
print(string)
# Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True)
string = generator("DeepSpeed is")
print(string)
Arguments:
model: Required: original nn.module object without any wrappers
config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file
Returns:
A deepspeed.InferenceEngine wrapped model.
"""
log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
__git_branch__),
ranks=[0])
# Load config_dict from config first
if config is None:
config = {}
if isinstance(config, str):
with open(config, "r") as f:
config_dict = json.load(f)
elif isinstance(config, dict):
config_dict = config
else:
raise ValueError(f"'config' argument expected string or dictionary, got {type(config)}")
# Update with values from kwargs, ensuring no conflicting overlap between config and kwargs
overlap_keys = set(config_dict.keys()).intersection(kwargs.keys())
# If there is overlap, error out if values are different
for key in overlap_keys:
if config_dict[key] != kwargs[key]:
raise ValueError(f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}")
config_dict.update(kwargs)
ds_inference_config = DeepSpeedInferenceConfig(**config_dict)
engine = InferenceEngine(model, config=ds_inference_config)
return engine
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import subprocess
import argparse
from .ops.op_builder.all_ops import ALL_OPS
from .git_version_info import installed_ops, torch_info
from deepspeed.accelerator import get_accelerator
GREEN = '\033[92m'
RED = '\033[91m'
YELLOW = '\033[93m'
END = '\033[0m'
SUCCESS = f"{GREEN} [SUCCESS] {END}"
OKAY = f"{GREEN}[OKAY]{END}"
WARNING = f"{YELLOW}[WARNING]{END}"
FAIL = f'{RED}[FAIL]{END}'
INFO = '[INFO]'
color_len = len(GREEN) + len(END)
okay = f"{GREEN}[OKAY]{END}"
warning = f"{YELLOW}[WARNING]{END}"
def op_report(verbose=True):
max_dots = 23
max_dots2 = 11
h = ["op name", "installed", "compatible"]
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("DeepSpeed C++/CUDA extension op report")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n"
" runtime if needed. Op compatibility means that your system\n"
" meet the required dependencies to JIT install the op.")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("JIT compiled ops requires ninja")
ninja_status = OKAY if ninja_installed() else FAIL
print('ninja', "." * (max_dots - 5), ninja_status)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2])
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
installed = f"{GREEN}[YES]{END}"
no = f"{YELLOW}[NO]{END}"
for op_name, builder in ALL_OPS.items():
dots = "." * (max_dots - len(op_name))
is_compatible = OKAY if builder.is_compatible(verbose) else no
is_installed = installed if installed_ops.get(op_name, False) else no
dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) - (len(is_installed) - color_len))
print(op_name, dots, is_installed, dots2, is_compatible)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
def ninja_installed():
try:
import ninja # noqa: F401
except ImportError:
return False
return True
def nvcc_version():
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
if cuda_home is None:
return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}"
try:
output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
except FileNotFoundError:
return f"{RED} [FAIL] nvcc missing {END}"
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
return ".".join(release)
def debug_report():
max_dots = 33
report = [("torch install path", torch.__path__), ("torch version", torch.__version__),
("deepspeed install path", deepspeed.__path__),
("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}")]
if get_accelerator().device_name() == 'cuda':
hip_version = getattr(torch.version, "hip", None)
report.extend([("torch cuda version", torch.version.cuda), ("torch hip version", hip_version),
("nvcc version", (None if hip_version else nvcc_version())),
("deepspeed wheel compiled w.", f"torch {torch_info['version']}, " +
(f"hip {torch_info['hip_version']}" if hip_version else f"cuda {torch_info['cuda_version']}"))
])
else:
report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']} ")])
print("DeepSpeed general environment info:")
for name, value in report:
print(name, "." * (max_dots - len(name)), value)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--hide_operator_status',
action='store_true',
help='Suppress display of installation and compatibility statuses of DeepSpeed operators. ')
parser.add_argument('--hide_errors_and_warnings', action='store_true', help='Suppress warning and error messages.')
args = parser.parse_args()
return args
def main(hide_operator_status=False, hide_errors_and_warnings=False):
if not hide_operator_status:
op_report(verbose=not hide_errors_and_warnings)
debug_report()
def cli_main():
args = parse_arguments()
main(hide_operator_status=args.hide_operator_status, hide_errors_and_warnings=args.hide_errors_and_warnings)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
from .constants import *
class ElasticityError(Exception):
"""
Base exception for all elasticity related errors
"""
class ElasticityConfigError(ElasticityError):
"""
Elasticity configuration error
"""
class ElasticityIncompatibleWorldSize(ElasticityError):
"""
Attempting to run a world size that is incompatible with a given elastic config
"""
class ElasticityConfig:
"""
Elastic config object, constructed from a param dictionary that only contains elastic
config parameters, example below:
If elasticity is enabled, user must specify (at least) max_train_batch_size
and micro_batch_sizes.
{
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"ignore_non_elastic_batch_info": false
"version": 0.1
}
"""
def __init__(self, param_dict):
self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT)
if self.enabled:
if MAX_ACCEPTABLE_BATCH_SIZE in param_dict:
self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE]
else:
raise ElasticityConfigError(f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}")
if MICRO_BATCHES in param_dict:
self.micro_batches = param_dict[MICRO_BATCHES]
else:
raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}")
else:
self.max_acceptable_batch_size = param_dict.get(MAX_ACCEPTABLE_BATCH_SIZE,
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT)
self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT)
if not isinstance(self.micro_batches, list):
raise ElasticityConfigError(
f"Elasticity expected value of {MICRO_BATCHES} to be a "
f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}")
if not all(map(lambda m: isinstance(m, int), self.micro_batches)):
raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, "
f"instead contains: f{self.micro_batches}")
if not all(map(lambda m: m > 0, self.micro_batches)):
raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, "
f"instead contains: f{self.micro_batches}")
self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT)
self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT)
if self.min_gpus < 1 or self.max_gpus < 1:
raise ElasticityConfigError("Elasticity min/max gpus must be > 0, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
if self.max_gpus < self.min_gpus:
raise ElasticityConfigError("Elasticity min_gpus cannot be greater than max_gpus, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
self.model_parallel_size = param_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
if self.model_parallel_size < 1:
raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.model_parallel_size}")
self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError("Number of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT)
if self.min_time < 0:
raise ElasticityConfigError(f"Elasticity min time needs to be >= 0: given {self.min_time}")
self.version = param_dict.get(VERSION, VERSION_DEFAULT)
self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH, PREFER_LARGER_BATCH_DEFAULT)
self.ignore_non_elastic_batch_info = param_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
def repr(self):
return self.__dict__
def __repr__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=4)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# Elasticity
#########################################
''' Elasticity Utility in DeepSpeed can be used to create highly elastic jobs compatible
with a large number of GPUs. For elastic jobs, DeepSpeed will provide a batch size that
can support a large number of GPUs based on the user specified parameters
'''
FORMAT = '''
Elasticity should be enabled as:
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000,
"min_time": 20,
"prefer_larger_batch": true,
"ignore_non_elastic_batch_info": false,
"version": 0.1
}
'''
ELASTICITY = 'elasticity'
# Current elasticity version
LATEST_ELASTICITY_VERSION = 0.2
ENABLED = 'enabled'
ENABLED_DEFAULT = False
# Max acceptable train_batch_size
MAX_ACCEPTABLE_BATCH_SIZE = 'max_train_batch_size'
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT = 2000
# Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu
MICRO_BATCHES = 'micro_batch_sizes'
MICRO_BATCHES_DEFAULT = [2, 4, 6]
# Min/max of GPUs to search over
MIN_GPUS = 'min_gpus'
MIN_GPUS_DEFAULT = 1
MAX_GPUS = 'max_gpus'
MAX_GPUS_DEFAULT = 10000
NUM_GPUS_PER_NODE = 'num_gpus_per_node'
NUM_GPUS_PER_NODE_DEFAULT = 1
MODEL_PARALLEL_SIZE = "model_parallel_size"
MODEL_PARALLEL_SIZE_DEFAULT = 1
# Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown
MIN_TIME = "min_time"
MIN_TIME_DEFAULT = 0
# When finding a suitable batch size, attempt to find one that is closest
# to the max train batch size given.
PREFER_LARGER_BATCH = 'prefer_larger_batch'
PREFER_LARGER_BATCH_DEFAULT = True
# In order to reduce confusion, if elastic mode is enabled we
# require (via assert) that no batch info is set outside of the
# elastic config. You can turn off this assert via this config
# but keep in mind that all batch info defined outside the
# elastic mode *will be ignored*.
IGNORE_NON_ELASTIC_BATCH_INFO = 'ignore_non_elastic_batch_info'
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT = False
# Version of elastic logic to use
VERSION = "version"
VERSION_DEFAULT = LATEST_ELASTICITY_VERSION
# Minimum deepspeed version to use elasticity
MINIMUM_DEEPSPEED_VERSION = "0.3.8"
# Environment variable storing elastic config from resource scheduler
DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config
from .utils import is_torch_elastic_compatible
from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY
if is_torch_elastic_compatible():
from .elastic_agent import DSElasticAgent
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from typing import Any, Dict, Optional, Tuple
from datetime import datetime
from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port
from torch.distributed.elastic.metrics import put_metric
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed import Store
import time
import os
from torch.distributed.elastic.multiprocessing import start_processes
from torch.distributed.elastic.utils import macros
import shutil
import copy
from contextlib import closing
import subprocess
class DSElasticAgent(LocalElasticAgent):
def __init__(
self,
spec: WorkerSpec,
env: Dict,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, start_method, exit_barrier_timeout, log_dir)
self.ds_env = env
@staticmethod
def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):
if master_port is None:
sock = _get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
if master_addr is None:
# master_addr = _get_fq_hostname()
result = subprocess.check_output("hostname -I", shell=True)
master_addr = result.decode('utf-8').split()[0]
store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env_ds = copy.deepcopy(self.ds_env)
worker_env_elastic = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)),
}
worker_env_ds.update(worker_env_elastic)
if "OMP_NUM_THREADS" in os.environ:
worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env_ds
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _invoke_run(self, role: str = "default") -> RunResult:
# NOTE: currently only works for a single role
spec = self._worker_group.spec
role = spec.role
log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}")
self._initialize_workers(self._worker_group)
monitor_interval = spec.monitor_interval
rdzv_handler = spec.rdzv_handler
participants = rdzv_handler._state_holder.state.participants
while True:
assert self._worker_group.state != WorkerState.INIT
time.sleep(monitor_interval)
run_result = self._monitor_workers(self._worker_group)
state = run_result.state
self._worker_group.state = state
expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *
rdzv_handler._settings.keep_alive_max_attempt)
_dead_nodes = [
node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()
if last_heartbeat < expire_time
]
put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
put_metric(f"workers.{role}.{state.name.lower()}", 1)
if state == WorkerState.SUCCEEDED:
log.info(f"[{role}] worker group successfully finished."
f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.")
self._exit_barrier()
return run_result
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED
} or len(participants) > len(rdzv_handler._state_holder.state.participants):
if self._remaining_restarts > 0:
log.info(f"[{role}] Worker group {state.name}. "
f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
f" will restart worker group")
self._remaining_restarts -= 1
# rdzv_handler._state_holder.state.restart = False
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
self._stop_workers(self._worker_group)
self._worker_group.state = WorkerState.FAILED
self._exit_barrier()
return run_result
elif state == WorkerState.HEALTHY:
# membership changes do not count as retries
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
group_rank = self._worker_group.group_rank
if num_nodes_waiting > 0:
log.info(f"[{role}] Detected {num_nodes_waiting} "
f"new nodes from group_rank={group_rank}; "
f"will restart worker group")
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
raise Exception(f"[{role}] Worker group in {state.name} state")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def is_torch_elastic_compatible():
'''
Helper to lookup torch version. Elastic training is
introduced in 1.11.x
'''
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR >= 11:
return True
else:
return False
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import json
import numpy as np
import math
from packaging import version as pkg_version
from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
ElasticityIncompatibleWorldSize
from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
from ..git_version_info import version as __version__
from ..utils import logger
# Thirty eight smallest highly composite numbers. The list should
# be enough to support up to 720K batch size.
HCN_LIST = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560, 10080, 15120, 20160,
25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, 720720
]
def get_candidate_batch_sizes(base_list, max_acceptable_batch_size):
candidate_batch_size = []
for base in base_list:
if base >= max_acceptable_batch_size:
candidate_batch_size.append(base)
else:
value = max_acceptable_batch_size // base
index = np.argmax(np.asarray(HCN_LIST) > value)
candidate_batch_size.append(HCN_LIST[index - 1] * base)
candidate_batch_size = list(set(candidate_batch_size))
logger.info(f"Candidate batch size: {candidate_batch_size}")
return candidate_batch_size
def get_valid_gpus(batch_size, micro_batches, min_valid_gpus, max_valid_gpus):
valid_gpus = []
for micro_batch in micro_batches:
if batch_size % micro_batch == 0:
max_gpus = batch_size // micro_batch
if max_gpus >= min_valid_gpus and max_gpus <= max_valid_gpus:
valid_gpus.append(max_gpus)
# find all factors less than max_gpus / 2
for i in range(1, max_gpus // 2 + 1):
if i > max_valid_gpus:
break
if i < min_valid_gpus:
continue
if max_gpus % i == 0:
valid_gpus.append(i)
valid_gpus = set(valid_gpus)
valid_gpus = sorted(list(valid_gpus))
return valid_gpus
def get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus, prefer_larger):
max_valid_gpus = 0
valid_gpus = None
final_batch_size = int(min(micro_batches))
for batch_size in candidate_batch_sizes:
current_valid_gpus = get_valid_gpus(batch_size, micro_batches, min_gpus, max_gpus)
if (len(current_valid_gpus) > max_valid_gpus or (len(current_valid_gpus) == max_valid_gpus and
((prefer_larger and batch_size > final_batch_size) or
(not prefer_larger and batch_size < final_batch_size)))):
max_valid_gpus = len(current_valid_gpus)
valid_gpus = current_valid_gpus
final_batch_size = batch_size
return final_batch_size, valid_gpus
def _get_compatible_gpus_v01(micro_batches,
max_acceptable_batch_size,
min_gpus=None,
max_gpus=None,
prefer_larger=True):
'''We use two heuristics to compute the batch size
1. We use the Lowest Common Multiple of the micro-batches
as the base batch size and scale it by a HCN such that the result is
the largest batch size less than the max_acceptable batch size
2. We use each of the micro batches as a base and scale it
by a HCN such that the result is the largest batch size less than the
max_acceptable batch size.
We then use brute force to count the number of compatible GPU count for
each of the aforementioned cases, and return the batch size with the most number of
compatible GPU counts in the min-max GPU range if provided, other wise
we return the batch size with the most number of total compatible GPU counts.
Returns:
final_batch_size
valid_gpus
'''
min_gpus = min_gpus or 1
max_gpus = max_gpus or max_acceptable_batch_size // min(micro_batches)
if not all(mb <= max_acceptable_batch_size for mb in micro_batches):
raise ValueError(f"All micro batches must be less than \
or equal to max_acceptable_batch_size: {max_acceptable_batch_size}")
lcm = np.lcm.reduce(micro_batches)
base_list = []
base_list.extend(micro_batches)
base_list.append(lcm)
candidate_batch_sizes = get_candidate_batch_sizes(base_list, max_acceptable_batch_size)
final_batch_size, valid_gpus = get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus,
prefer_larger)
return final_batch_size, valid_gpus
def _get_compatible_gpus_v02(micro_batches,
max_acceptable_batch_size,
current_num_gpus,
min_gpus=None,
max_gpus=None,
prefer_larger=True,
num_gpus_per_node=1,
model_parallel_size=1):
'''
Returns:
final_batch_size
valid_gpus
micro-batch size
'''
if num_gpus_per_node % model_parallel_size != 0:
raise ElasticityError(
f"In Elasticity v0.2, number of GPUs per node:" \
f"{num_gpus_per_node} should be divisible by " \
f"model parallel size {model_parallel_size}")
def get_microbatch(final_batch_size):
candidate_microbatch = None
for micro_batch in micro_batches:
if final_batch_size // current_num_gpus % micro_batch == 0:
if candidate_microbatch == None:
candidate_microbatch = micro_batch
if prefer_larger and candidate_microbatch < micro_batch:
candidate_microbatch = micro_batch
return candidate_microbatch
dp_size_per_node = num_gpus_per_node // model_parallel_size
final_batch_size, valid_world_size = _get_compatible_gpus_v01(
micro_batches,
int(max_acceptable_batch_size / dp_size_per_node),
int(min_gpus / num_gpus_per_node),
int(max_gpus / num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level
prefer_larger=prefer_larger)
final_batch_size = int(final_batch_size) * dp_size_per_node
valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size]
if current_num_gpus // model_parallel_size in valid_dp_world_size:
candidate_microbatch = get_microbatch(final_batch_size)
return final_batch_size, valid_dp_world_size, candidate_microbatch
current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node
candidate_batch_sizes = []
for micro_batch in micro_batches:
min_batch_size = micro_batch * current_dp_size
factor = math.floor(max_acceptable_batch_size / float(min_batch_size))
candidate_batch_sizes.append(factor * min_batch_size)
used_microbatch = None
if prefer_larger:
candidate_batch_size = max(candidate_batch_sizes)
else:
candidate_batch_size = min(candidate_batch_sizes)
candidate_microbatch = get_microbatch(candidate_batch_size)
return candidate_batch_size, [int(current_dp_size)], candidate_microbatch
def _compatible_ds_version_check(target_deepspeed_version: str):
min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION)
target_version = pkg_version.parse(target_deepspeed_version)
err_str = f"Target deepspeed version of {target_deepspeed_version} is not compatible " \
f"with minimum version {MINIMUM_DEEPSPEED_VERSION} supporting elasticity."
if target_version < min_version:
raise ElasticityError(err_str)
return True
def elasticity_enabled(ds_config: dict):
if ELASTICITY not in ds_config:
return False
return ds_config[ELASTICITY].get(ENABLED, ENABLED_DEFAULT)
def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict):
"""
Ensure the resource scheduler saw the same elastic config we are using at runtime
"""
if DEEPSPEED_ELASTICITY_CONFIG in os.environ:
scheduler_elastic_config_dict = json.loads(os.environ[DEEPSPEED_ELASTICITY_CONFIG])
scheduler_elastic_config = ElasticityConfig(scheduler_elastic_config_dict)
runtime_elastic_config = ElasticityConfig(runtime_elastic_config_dict)
err_str = "Elastic config '{}={}' seen by resource scheduler does not match config passed to runtime {}={}"
if runtime_elastic_config.max_acceptable_batch_size != scheduler_elastic_config.max_acceptable_batch_size:
raise ElasticityConfigError(
err_str.format('max_acceptable_batch_size', scheduler_elastic_config.max_acceptable_batch_size,
'max_acceptable_batch_size', runtime_elastic_config.max_acceptable_batch_size))
if runtime_elastic_config.micro_batches != scheduler_elastic_config.micro_batches:
raise ElasticityConfigError(
err_str.format('micro_batches', scheduler_elastic_config.micro_batches, 'micro_batches',
runtime_elastic_config.micro_batches))
if runtime_elastic_config.version != scheduler_elastic_config.version:
raise ElasticityConfigError(
err_str.format('version', scheduler_elastic_config.version, 'version', runtime_elastic_config.version))
else:
logger.warning("Unable to find DEEPSPEED_ELASTICITY_CONFIG environment variable, cannot " \
"guarantee resource scheduler will scale this job using compatible GPU counts.")
def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0, return_microbatch=False):
"""Core deepspeed elasticity API. Given an elastic config (similar to the example below)
DeepSpeed will compute a total train batch size corresponding valid GPU count list that
provides a high level of elasticity. Elasticity in this case means we are safe to scale
the training job up/down across the GPU count list *without* any negative impacts on
training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation
feature which allows us to decompose a global training batch size into:
micro-batch-size * gradient-accumulation-steps * world-size.
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"version": 0.1
}
Intended to be called both by scheduling infrastructure and deepspeed runtime.
For the same `ds_config` we should return deterministic results.
Args:
ds_config (dict): DeepSpeed config dictionary/json
target_deepspeed_version (str): When called from scheduling
infrastructure we want to ensure that the target deepspeed version is
compatible with the elasticity version used in the backend.
world_size (int, optional): Intended/current DP world size, will do some sanity
checks to ensure world size is actually valid with the config.
return_microbatch (bool, optional): whether to return micro batch size or not.
Raises:
ElasticityConfigError: Missing required elasticity config or elasticity disabled
ElasticityError: If target deepspeed version is not compatible with current version
Returns:
final_batch_size (int): total batch size used for training
valid_gpus (list(int)): list of valid GPU counts with this config
micro_batch_size (int, optional): if world_size is provided will return
specific micro batch size
"""
if not isinstance(ds_config, dict):
raise ValueError("Expected ds_config to be a dictionary but received " \
f"a {type(ds_config)}, containing: {ds_config}")
if ELASTICITY not in ds_config:
raise ElasticityConfigError(f"'{ELASTICITY}' is missing from config json," \
" please add it if running an elastic training job.")
elastic_config_dict = ds_config[ELASTICITY]
if not elastic_config_dict.get(ENABLED, ENABLED_DEFAULT):
raise ElasticityConfigError("Elasticity is disabled, please enable it " \
"('enabled':true) if running an elastic training job.")
elastic_config = ElasticityConfig(elastic_config_dict)
model_parallel_size = elastic_config.model_parallel_size
num_gpus_per_node = elastic_config.num_gpus_per_node
if model_parallel_size > 1 and float(elastic_config.version) != 0.2:
raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \
f"does not support model-parallel training. Given model-parallel size: " \
f"{model_parallel_size}")
if float(elastic_config.version) > LATEST_ELASTICITY_VERSION:
raise ElasticityConfigError("Attempting to run elasticity version " \
f"{elastic_config.version} but runtime only supports up " \
f"to {LATEST_ELASTICITY_VERSION}")
# Ensure target deepspeed version works with intended elasticity version
if not _compatible_ds_version_check(target_deepspeed_version):
raise ElasticityError("Unable to run elasticity on target deepspeed version of" \
f" {target_deepspeed_version}, currently {__version__}")
if float(elastic_config.version) == 0.1:
final_batch_size, valid_gpus = _get_compatible_gpus_v01(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
elif float(elastic_config.version) == 0.2:
if world_size != 0:
current_num_gpus = world_size
else:
if "WORLD_SIZE" in os.environ and \
os.getenv('WORLD_SIZE').isnumeric():
current_num_gpus = int(os.getenv('WORLD_SIZE'))
else:
WORLD_SIZE = os.getenv('WORLD_SIZE')
raise ElasticityConfigError(
'Elasticity V 0.2 needs WORLD_SIZE '\
'to compute valid batch size. '\
'Either give it as argument to function compute_elastic_config '\
'or set it as an environment variable. '\
f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}')
final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
current_num_gpus=current_num_gpus,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size,
num_gpus_per_node=num_gpus_per_node,
model_parallel_size=model_parallel_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
else:
raise NotImplementedError(f"Unable to find elastic logic for version: {elastic_config.version}")
logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}")
if world_size > 0:
if world_size not in valid_gpus:
raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \
f"with the current list of valid GPU counts: {valid_gpus}")
# Pick largest valid micro batch size
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
if return_microbatch:
# Pick a valid micro batch size
if float(elastic_config.version) == 0.2:
return final_batch_size, valid_gpus, candidate_microbatch_size
else:
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
return final_batch_size, valid_gpus
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed runner is the main front-end to launching multi-worker
training jobs with DeepSpeed. By default this uses pdsh to parallel
ssh into multiple worker nodes and launch all the necessary processes
per rank for training.
"""
import os
import re
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import signal
import time
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner, MPICHRunner, IMPIRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER, MPICH_LAUNCHER, IMPI_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import NEBULA_EXPORT_ENVS
from ..utils import logger
from ..autotuning import Autotuner
from deepspeed.accelerator import get_accelerator
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ['MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX']
EXPORT_ENVS += NEBULA_EXPORT_ENVS
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
def parse_args(args=None):
parser = argparse.ArgumentParser(description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--min_elastic_nodes",
type=int,
default=-1,
help="Minimum number of nodes to run elastic training on. "
"Default is 1 when elastic training is enabled")
parser.add_argument("--max_elastic_nodes",
type=int,
default=-1,
help="Maximum number of nodes to run elastic training on. "
"Default is num_nodes when elastic training is enabled")
parser.add_argument("--num_gpus",
"--num_accelerators",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH, SLURM, MPICH, IMPI.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument("--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument("--enable_each_rank_log",
default="None",
type=str,
help="redirect the stdout and stderr from each rank into different log files")
parser.add_argument("--autotuning",
default="",
choices=["tune", "run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("--elastic_training",
action="store_true",
help="Enable elastic training support in DeepSpeed.")
parser.add_argument("user_script", type=str, help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
parser.add_argument("--bind_cores_to_rank",
action="store_true",
help="Bind each rank to different cores of the host")
parser.add_argument("--bind_core_list",
type=str,
default=None,
help="List of cores to bind to with comma separated list of "
"numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
"specified, all cores on system would be used rank binding")
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
hostfile_text = fd.readlines()
return _parse_hostfile(hostfile_text)
def _parse_hostfile(hostfile_lines):
# Regex matches one or more non-whitespace characters (\S+) at the start of
# the line, followed by one or more whitespace characters (\s+), followed
# by the string "slots=", followed by one or more digits (\d+).
pattern = r'^(\S+)\s+slots=(\d+)'
resource_pool = collections.OrderedDict()
for line in hostfile_lines:
line = line.strip()
match = re.search(pattern, line)
if line.startswith("#") or line == "":
# hostfile comment or empty line, ignore
continue
elif match:
host = match.group(1)
num_slots = int(match.group(2))
if host in resource_pool:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError(f"Hostfile contains multiple entries for {host}, unable to proceed with launching")
resource_pool[host] = num_slots
else:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError("Hostfile contains a bad entry: {line}, unable to proceed with launching")
if len(resource_pool) == 0:
logger.error(f"Bad hostfile text: {hostfile_lines}")
raise ValueError("Hostfile is empty or not formatted correctly, unable to proceed with launching.")
return resource_pool
def _stable_remove_duplicates(data):
# Create a new list in the same order as original but with duplicates
# removed, should never be more than ~16 elements so simple is best
new_list = []
for x in data:
if x not in new_list:
new_list.append(x)
return new_list
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
for slot in slots:
if slot not in host_info[hostname]:
raise ValueError(f"No slot '{slot}' specified on host '{hostname}'")
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for slot in slots:
logger.info(f'removing {slot} from {hostname}')
filtered_hosts[hostname].remove(slot)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname])
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources, include_str=inclusion, exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def run_autotuning(args, active_resources):
tuner = Autotuner(args, active_resources)
logger.info("[Start] Running autotuning")
tuner.tune()
tuner.print_tuning_results()
logger.info("[End] Running autotuning")
tuner.write_optimal_config()
if args.autotuning == "run":
tuner.run_after_tuning()
def parse_num_nodes(str_num_nodes: str, elastic_training: bool):
node_list = str_num_nodes.split(":")
if len(node_list) == 1:
min_nodes, max_nodes = int(node_list[0]), -1
elif len(node_list) == 2 and elastic_training:
min_nodes, max_nodes = int(node_list[0]), int(node_list[1])
elif len(node_list) == 2 and not elastic_training:
raise RuntimeError("MIN:MAX format is only supported in elastic training")
else:
raise RuntimeError("num_nodes {} is not in MIN:MAX format".format(str_num_nodes))
return min_nodes, max_nodes
def main(args=None):
args = parse_args(args)
if args.elastic_training:
assert args.master_addr != "", "Master Addr is required when elastic training is enabled"
resource_pool = fetch_hostfile(args.hostfile)
# respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if not resource_pool and len(cuda_visible_devices):
detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}"
if len(args.include) or len(args.exclude) or args.num_nodes > 1 or args.num_gpus > 0:
print(
f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed."
)
else:
args.include = f"localhost:{cuda_visible_devices}"
print(f"{detected_str}: setting --include={args.include}")
del os.environ["CUDA_VISIBLE_DEVICES"]
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
if not resource_pool:
resource_pool = {}
device_count = get_accelerator().device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool, args.include, args.exclude)
env = os.environ.copy()
# validate that passwordless-ssh is workly properly with this hostfile
if multi_node_exec and not args.no_ssh_check:
first_host = list(active_resources.keys())[0]
try:
subprocess.check_call(f'ssh -o PasswordAuthentication=no {first_host} hostname',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh."
)
if not args.master_addr:
assert multi_node_exec
first_host = list(active_resources.keys())[0]
hostname_cmd = [f"ssh {first_host} hostname -I"]
try:
result = subprocess.check_output(hostname_cmd, shell=True)
except subprocess.CalledProcessError as err:
logger.error(
"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
)
raise err
args.master_addr = result.decode('utf-8').split()[0]
if not args.master_addr:
raise RuntimeError(
f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr"
)
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
if args.autotuning != "":
run_autotuning(args, active_resources)
return
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
if args.elastic_training:
assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training"
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable, "-u", "-m", "deepspeed.launcher.launch", f"--world_info={world_info_base64}",
f"--master_addr={args.master_addr}", f"--master_port={args.master_port}"
]
if args.no_python:
deepspeed_launch.append("--no_python")
if args.module:
deepspeed_launch.append("--module")
if args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
if args.enable_each_rank_log:
deepspeed_launch.append(f"--enable_each_rank_log={args.enable_each_rank_log}")
if args.elastic_training:
deepspeed_launch.append("--enable_elastic_training")
deepspeed_launch.append(f"--max_elastic_nodes={args.max_elastic_nodes}")
deepspeed_launch.append(f"--min_elastic_nodes={args.min_elastic_nodes}")
if args.bind_cores_to_rank:
deepspeed_launch.append("--bind_cores_to_rank")
if args.bind_core_list != None:
deepspeed_launch.append(f"--bind_core_list={args.bind_core_list}")
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MPICH_LAUNCHER:
runner = MPICHRunner(args, world_info_base64, resource_pool)
elif args.launcher == IMPI_LAUNCHER:
runner = IMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
elif args.launcher == SLURM_LAUNCHER:
runner = SlurmRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=', maxsplit=1)
runner.add_export(key, val)
if args.launcher == PDSH_LAUNCHER:
cmd, kill_cmd = runner.get_cmd(env, active_resources)
else:
cmd = runner.get_cmd(env, active_resources)
logger.info(f"cmd = {' '.join(cmd)}")
result = subprocess.Popen(cmd, env=env)
def sigkill_handler(signum, frame):
result.send_signal(signal.SIGINT)
time.sleep(0.1)
result.send_signal(signal.SIGTERM)
result_kill = subprocess.Popen(kill_cmd, env=env)
result_kill.wait()
time.sleep(1)
sys.exit(1)
if args.launcher == PDSH_LAUNCHER:
signal.signal(signal.SIGINT, sigkill_handler)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import sys
import shutil
import subprocess
import warnings
from shlex import split
from abc import ABC, abstractmethod
from deepspeed.accelerator import get_accelerator
from ..utils import logger, get_numactl_cmd
from .constants import PDSH_MAX_FAN_OUT, MVAPICH_TMP_HOSTFILE
class MultiNodeRunner(ABC):
def __init__(self, args, world_info_base64):
self.args = args
self.validate_args()
self.user_arguments = self.parse_user_args()
self.user_script = args.user_script
self.world_info_base64 = world_info_base64
self.exports = {}
@abstractmethod
def backend_exists(self):
"""Return whether the corresponding backend exists"""
@abstractmethod
def get_cmd(self, environment, active_resources):
"""Return the command to execute on node"""
def add_export(self, key, var):
self.exports[key.strip()] = var.strip()
def parse_user_args(self):
return self.args.user_args
@property
def name(self):
"""Return the name of the backend"""
return self.__class__.__name__
def validate_args(self):
"""Validate self.args"""
class PDSHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64):
super().__init__(args, world_info_base64)
def backend_exists(self):
return shutil.which('pdsh')
@property
def name(self):
return "pdsh"
def parse_user_args(self):
return list(map(lambda x: x if x.startswith("-") else f"'{x}'", self.args.user_args))
def get_cmd(self, environment, active_resources):
environment['PDSH_RCMD_TYPE'] = 'ssh'
active_workers = ",".join(active_resources.keys())
logger.info("Running on the following workers: %s" % active_workers)
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd_args = ['pdsh', '-S', '-f', str(PDSH_MAX_FAN_OUT), '-w', active_workers] + split(
self.args.launcher_args)
exports = ""
for key, val in self.exports.items():
exports += "export {}={}; ".format(key, val)
# https://linux.die.net/man/1/pdsh
# %n will be replaced by pdsh command
deepspeed_launch = [
exports, f"cd {os.path.abspath('.')};", sys.executable, "-u", "-m", "deepspeed.launcher.launch",
f'--world_info={self.world_info_base64}', "--node_rank=%n", f"--master_addr={self.args.master_addr}",
f"--master_port={self.args.master_port}"
]
if self.args.no_python:
deepspeed_launch.append("--no_python")
if self.args.module:
deepspeed_launch.append("--module")
if self.args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if self.args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
if self.args.elastic_training:
deepspeed_launch.append("--enable_elastic_training")
deepspeed_launch.append(f"--max_elastic_nodes={self.args.max_elastic_nodes}")
deepspeed_launch.append(f"--min_elastic_nodes={self.args.min_elastic_nodes}")
cmd_to_search = [i + "\\" for i in deepspeed_launch[2:6]]
kill_command = pdsh_cmd_args + ["pkill -f ", " ".join(cmd_to_search)[:-2]]
return pdsh_cmd_args + deepspeed_launch + [self.user_script] + self.user_arguments, kill_command
class OpenMPIRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
self.add_export('UCX_TLS', 'tcp')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
return shutil.which('ompi_info')
@property
def name(self):
return "openmpi"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
total_process_count = sum(self.resource_pool.values())
mpirun_cmd = [
'mpirun',
'-n',
f'{total_process_count}',
'-hostfile',
f'{self.args.hostfile}',
'--mca',
'btl',
'^openib',
'--mca',
'btl_tcp_if_include',
'eth0',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-x', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
class MPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
#TODO: if IB is available we should suggestion mpich
return shutil.which('mpirun') #mpich_info
@property
def name(self):
return "mpich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("MPICH requires same number of devices per node")
mpirun_cmd = [
'mpirun',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-genv', "{}={}".format(k, v)]
export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)]
export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)]
export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)]
export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)]
hosts = list(self.resource_pool.keys())
per_host_cmd = []
host_id = 0
host_count = 0
for i in range(total_process_count):
local_rank = i % process_per_node
python_exec = []
if not self.args.no_python:
python_exec += [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
env_mapping = ['-env', 'RANK', str(i)]
env_mapping += ['-env', 'LOCAL_RANK', str(local_rank)]
if i == 0:
per_host_cmd = ['-n', '1', '-host', hosts[host_id]
] + env_mapping + python_exec + [self.user_script] + self.user_arguments
else:
per_host_cmd = per_host_cmd + [':', '-n', '1', '-host', hosts[host_id]
] + env_mapping + python_exec + [self.user_script] + self.user_arguments
host_count = host_count + 1
if host_count == process_per_node:
host_id = host_id + 1
host_count = 0
return mpirun_cmd + export_cmd + per_host_cmd
class IMPIRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
#TODO: if IB is available we should suggestion mpich
return shutil.which('mpirun') #mpich_info
@property
def name(self):
return "impi"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("Intel MPI requires same number of devices per node")
mpirun_cmd = [
'mpirun',
'-ppn',
f'{process_per_node}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-genv', f'{k}', f'{v}']
if self.args.bind_cores_to_rank:
cores_per_rank, _ = get_numactl_cmd(self.args.bind_core_list, process_per_node, 0)
export_cmd += ['-genv', 'OMP_NUM_THREADS', str(cores_per_rank)]
export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)]
export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)]
export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)]
export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)]
export_cmd += ['-hosts']
hosts = ""
for i, host in enumerate(self.resource_pool.keys()):
if i == 0:
hosts = f"{host}"
else:
hosts += f",{host}"
export_cmd += [hosts]
per_host_cmd = []
for i in range(total_process_count):
local_rank = i % process_per_node
python_exec = []
if self.args.bind_cores_to_rank:
_, numactl_cmd = get_numactl_cmd(self.args.bind_core_list, process_per_node, local_rank)
python_exec += numactl_cmd
if not self.args.no_python:
python_exec += [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
env_mapping = ['-env', 'RANK', str(i)]
env_mapping += ['-env', 'LOCAL_RANK', str(local_rank)]
if i == 0:
per_host_cmd = ['-n', '1'] + env_mapping + python_exec + [self.user_script] + self.user_arguments
else:
per_host_cmd = per_host_cmd + [':', '-n', '1'] + env_mapping + python_exec + [self.user_script
] + self.user_arguments
print(mpirun_cmd + export_cmd + per_host_cmd)
return mpirun_cmd + export_cmd + per_host_cmd
class SlurmRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
def backend_exists(self):
return shutil.which('sinfo')
@property
def name(self):
return 'slurm'
def get_cmd(self, environment, active_resources):
assert not getattr(self.args, 'detect_nvlink_pairs',
False), "slurm backend does not support remapping visible devices"
total_process_count = sum(self.resource_pool.values())
srun_cmd = [
'srun',
'-n',
f'{total_process_count}',
] + split(self.args.launcher_args)
if getattr(self.args, 'slurm_comment', ''):
srun_cmd += ['--comment', self.args.slurm_comment]
if self.args.include != "":
srun_cmd.append('--include')
srun_cmd.append(f'{self.args.include}')
if self.args.exclude != "":
srun_cmd.append('--exclude')
srun_cmd.append(f'{self.args.exclude}')
if self.args.num_nodes > 0:
srun_cmd.append('--nodes')
srun_cmd.append(f'{self.args.num_nodes}')
if self.args.num_gpus > 0:
srun_cmd.append('--gpus')
srun_cmd.append(f'{self.args.num_gpus}')
exports = '--export=ALL'
for key, val in self.exports.items():
exports += f",{key}={val}"
python_exec = [sys.executable, "-u"]
command = srun_cmd + [exports] + python_exec + [self.user_script] + self.user_arguments
return command
class MVAPICHRunner(MultiNodeRunner):
def __init__(self, args, world_info_base64, resource_pool):
super().__init__(args, world_info_base64)
self.resource_pool = resource_pool
# Disable the CMA kernel module, not available on Ubuntu systems
self.add_export('MV2_SMP_USE_CMA', '0')
# If we fail this will output more verbose logging
self.add_export('MV2_DEBUG_SHOW_BACKTRACE', '1')
# Enabled cuda-aware communication
if get_accelerator().device_name() == 'cuda':
self.add_export('MV2_USE_CUDA', '1')
# Support deep learning frameworks: http://hidl.cse.ohio-state.edu/userguide/horovod/
self.add_export('MV2_SUPPORT_DL', '1')
# Support MPI_THREAD_MULTIPLE
self.add_export('MV2_ENABLE_AFFINITY', '0')
# Performance tuning flags for allgather
self.add_export('MV2_INTER_ALLGATHER_TUNING', '5')
self.add_export('MV2_CUDA_USE_NAIVE', '0')
def backend_exists(self):
#TODO: if IB is available we should suggestion mvapich
mpiname_exists = shutil.which('mpiname')
exists = False
if not mpiname_exists:
warnings.warn("mpiname does not exist, mvapich is not installed properly")
else:
results = subprocess.check_output('mpiname', shell=True)
mpiname_results = results.decode('utf-8').strip()
if "MVAPICH2-GDR" in mpiname_results:
exists = True
else:
warnings.warn(f"Expected MVAPICH2-GDR as return for mpiname but received {mpiname_results}")
return exists
@property
def name(self):
return "mvapich"
def validate_args(self):
super().validate_args()
#TODO: Allow for include/exclude at node-level but not gpu-level
if self.args.include != "" or self.args.exclude != "":
raise ValueError(f"{self.name} backend does not support worker include/exclusion")
if self.args.num_nodes != -1 or self.args.num_gpus != -1:
raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus")
def get_cmd(self, environment, active_resources):
devices_per_node = self.resource_pool.values()
total_process_count = sum(devices_per_node)
process_per_node = list(devices_per_node)[0]
if not all([n == process_per_node for n in devices_per_node]):
raise ValueError("mvapich requires same number of devices per node")
with open(MVAPICH_TMP_HOSTFILE, 'w') as fd:
for host in self.resource_pool.keys():
fd.write(f'{host}\n')
mpirun_cmd = [
'mpirun',
'-np',
f'{total_process_count}',
'-ppn',
f'{process_per_node}',
'--hostfile',
f'{MVAPICH_TMP_HOSTFILE}',
] + split(self.args.launcher_args)
export_cmd = []
for k, v in self.exports.items():
export_cmd += ['-env', "{}={}".format(k, v)]
python_exec = []
if not self.args.no_python:
python_exec = [sys.executable, "-u"]
if self.args.module:
python_exec.append("-m")
return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
PDSH_LAUNCHER = 'pdsh'
PDSH_MAX_FAN_OUT = 1024
OPENMPI_LAUNCHER = 'openmpi'
MPICH_LAUNCHER = 'mpich'
IMPI_LAUNCHER = 'impi'
SLURM_LAUNCHER = 'slurm'
MVAPICH_LAUNCHER = 'mvapich'
MVAPICH_TMP_HOSTFILE = '/tmp/deepspeed_mvapich_hostfile'
ELASTIC_TRAINING_ID_DEFAULT = "123456789"
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed launcher, this is similar to torch's distributed.launch but supports
additional features such as arbitrary gpu exclusion.
deepspeed.launcher.launch is intended to be run on a single worker node and
will spawn several worker sub-processes depending on how many devices/ranks
are on the worker.
"""
import sys
import subprocess
import os
import json
import base64
import time
import signal
import psutil
from collections import defaultdict
from typing import Dict
from argparse import ArgumentParser, REMAINDER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import DLTS_POD_ENV_PATH
from ..utils import logger, get_numactl_cmd
from ..elasticity import is_torch_elastic_compatible
from .constants import ELASTIC_TRAINING_ID_DEFAULT
PID_FILE_BASEPATH = "/tmp"
def parse_args():
parser = ArgumentParser(description="DeepSpeed distributed training launch"
" utility that creates multiple distributed"
" processes on a single node")
# Optional arguments for the launch helper
parser.add_argument("--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either"
" the IP address or the hostname of node 0, for"
" single node multi-proc training, the"
" --master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communication during distributed "
"training")
parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.")
parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.")
parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--save_pid",
type=int,
default=0,
help="main launching process pid, for internal pid tracking")
parser.add_argument("--enable_each_rank_log",
default="None",
type=str,
help="redirect the stdout and stderr from each rank into different log files")
parser.add_argument("--bind_cores_to_rank",
action="store_true",
help="Bind each rank to different cores of the host. "
"This improves host efficiency especially for CPU backend")
parser.add_argument("--bind_core_list",
type=str,
default=None,
help="List of cores to bind to with comma separated list of "
"numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
"specified, all cores on system would be used rank binding")
# positional
parser.add_argument("training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
# Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree
def terminate_process_tree(pid):
process = psutil.Process(pid)
children = process.children(recursive=True)
children.append(process)
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(children, timeout=30)
for p in alive:
p.kill()
def main():
args = parse_args()
current_env = os.environ.copy()
for k in current_env.keys():
if "NCCL" in k:
logger.info(f"{args.node_rank} {k}={current_env[k]}")
if args.world_info == "None":
raise ValueError("world_info can not be None")
world_info = base64.urlsafe_b64decode(args.world_info)
world_info = json.loads(world_info)
logger.info(f"WORLD INFO DICT: {world_info}")
node_list = list(world_info.keys())
args.nnodes = len(node_list)
local_node = node_list[args.node_rank]
local_gpu_ids = world_info[local_node]
num_local_procs = len(local_gpu_ids)
logger.info(f"nnodes={args.nnodes}, num_local_procs={num_local_procs}, node_rank={args.node_rank}")
global_rank_mapping = defaultdict(list)
curr_global_rank = 0
dist_world_size = 0
for node_id in node_list:
gids = world_info[node_id]
dist_world_size += len(gids)
for gid in gids:
global_rank_mapping[node_id].append(curr_global_rank)
curr_global_rank += 1
logger.info(f"global_rank_mapping={global_rank_mapping}")
logger.info(f"dist_world_size={dist_world_size}")
current_env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, local_gpu_ids))
logger.info(f"Setting CUDA_VISIBLE_DEVICES={current_env['CUDA_VISIBLE_DEVICES']}")
# set PyTorch distributed related environmental variables
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
current_env["CROSS_RANK"] = str(args.node_rank)
current_env["CROSS_SIZE"] = str(args.nnodes)
current_env["LOCAL_SIZE"] = str(num_local_procs)
if args.save_pid:
print(f"launcher pid: {os.getpid()}")
pid_file = None
if args.save_pid:
launcher_pid = os.getpid()
pid_file = os.path.join(PID_FILE_BASEPATH, f"{args.save_pid}.deepspeed")
assert not os.path.isfile(pid_file), "pid file exists but shouldn't"
with open(pid_file, 'w') as fd:
fd.write(f"{launcher_pid}")
if not is_torch_elastic_compatible():
if args.enable_elastic_training:
logger.info(f"Disabling elastic training support as \
PyTorch version should be greater than 1.11.x")
args.enable_elastic_training = False
if os.path.exists(DLTS_POD_ENV_PATH):
with open(DLTS_POD_ENV_PATH) as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
for line in lines:
if line.startswith('export FC_TASKROLE_NAME') or line.startswith('export FC_TASK_INDEX'):
key_val = line.split()[1]
key, val = key_val.split('=')
current_env[key] = val
processes = []
cmd = []
if not args.enable_elastic_training:
if args.enable_each_rank_log != "None":
# prepare the log path and the file name prefix
if os.path.isfile(args.enable_each_rank_log):
raise ValueError(f"{args.enable_each_rank_log} should not be a file, it should be a directory.")
if not os.path.exists(args.enable_each_rank_log):
try:
os.makedirs(args.enable_each_rank_log)
except Exception as e:
print(e)
raise ValueError(f"unable to create directory {args.enable_each_rank_log} for each rank log.")
log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime())
for local_rank in range(0, num_local_procs):
# each process's rank
dist_rank = global_rank_mapping[local_node][local_rank]
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = []
if args.bind_cores_to_rank:
cores_per_rank, numactl_cmd = get_numactl_cmd(args.bind_core_list, num_local_procs, local_rank)
current_env["OMP_NUM_THREADS"] = f"{cores_per_rank}"
cmd = cmd + numactl_cmd
if not args.no_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
# A user may not want to pass local_rank as a keyword arg so we make this optional.
if not args.no_local_rank:
cmd.append(f"--local_rank={local_rank}")
cmd += args.training_script_args
if args.enable_each_rank_log != "None":
log_file = os.path.join(args.enable_each_rank_log, f"{log_name_prefix}_rank{dist_rank}.log")
log_fd = open(log_file, 'w')
process = subprocess.Popen(cmd, env=current_env, stdout=log_fd, stderr=log_fd)
else:
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
else:
from ..elasticity import DSElasticAgent
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.agent.server.api import WorkerSpec
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.multiprocessing import Std
if args.min_elastic_nodes == -1:
args.min_elastic_nodes = 1
if args.max_elastic_nodes == -1:
args.max_elastic_nodes = args.nnodes
assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0, "Max and Min nodes should be positive"
current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
# Get config and arguments
cmd = []
if not args.no_python:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
cmd += args.training_script_args
cmd_args = cmd[1:]
rdzv_configs: Dict[str, str] = {'timeout': 100}
run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT)
# Creating config for rendezvous class
rdzv_parameters = RendezvousParameters(backend='c10d',
endpoint=args.master_addr + ":" + str(args.master_port),
run_id=run_id,
min_nodes=args.min_elastic_nodes,
max_nodes=args.max_elastic_nodes,
**rdzv_configs)
spec = WorkerSpec(
role='trainer',
local_world_size=num_local_procs,
entrypoint=cmd[0],
args=cmd[1:],
rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
max_restarts=100,
monitor_interval=5,
redirects=Std.from_str("0"),
tee=Std.from_str("0"),
master_addr=None,
master_port=None,
)
agent = DSElasticAgent(spec, current_env)
agent.run()
sig_names = {2: "SIGINT", 15: "SIGTERM"}
last_return_code = None
def sigkill_handler(signum, frame):
for process in processes:
logger.info(f"Killing subprocess {process.pid}")
try:
terminate_process_tree(process.pid)
except Exception:
pass
if last_return_code is not None:
logger.error(f"{cmd} exits with return code = {last_return_code}")
sys.exit(last_return_code)
if signum in sig_names:
logger.info(f"Main process received {sig_names[signum]}, exiting")
if args.save_pid:
if os.path.isfile(pid_file):
os.remove(pid_file)
sys.exit(1)
# pass SIGINT/SIGTERM to children if the parent is being terminated
signal.signal(signal.SIGINT, sigkill_handler)
signal.signal(signal.SIGTERM, sigkill_handler)
alive_processes = set(processes)
while len(alive_processes):
finished_processes = []
for process in alive_processes:
if process.poll() is None:
# the process is still running
continue
else:
if process.returncode != 0:
last_return_code = process.returncode # for sigkill_handler
sigkill_handler(signal.SIGTERM, None) # not coming back
else:
# exited cleanly
logger.info(f"Process {process.pid} exits successfully.")
finished_processes.append(process)
alive_processes = set(alive_processes) - set(finished_processes)
time.sleep(1)
if __name__ == "__main__":
main()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .transformers.ds_transformer import DeepSpeedTransformerInference
from .transformers.clip_encoder import DSClipEncoder
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSUNet(CUDAGraph, torch.nn.Module):
def __init__(self, unet, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.unet = unet
# SD pipeline accesses this attribute
self.in_channels = unet.in_channels
self.device = self.unet.device
self.dtype = self.unet.dtype
self.config = self.unet.config
self.fwd_count = 0
self.unet.requires_grad_(requires_grad=False)
self.unet.to(memory_format=torch.channels_last)
self.cuda_graph_created = False
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True, cross_attention_kwargs=None):
if cross_attention_kwargs:
return self.unet(sample,
timestamp,
encoder_hidden_states,
return_dict,
cross_attention_kwargs=cross_attention_kwargs)
else:
return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSVAE(CUDAGraph, torch.nn.Module):
def __init__(self, vae, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.vae = vae
self.config = vae.config
self.device = self.vae.device
self.dtype = self.vae.dtype
self.vae.requires_grad_(requires_grad=False)
self.decoder_cuda_graph_created = False
self.encoder_cuda_graph_created = False
self.all_cuda_graph_created = False
def _graph_replay_decoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_decoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_decoder_kwargs[k].copy_(kwargs[k])
self._decoder_cuda_graph.replay()
return self.static_decoder_output
def _decode(self, x, return_dict=True):
return self.vae.decode(x, return_dict=return_dict)
def _create_cuda_graph_decoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._decode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._decoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_decoder_inputs = inputs
self.static_decoder_kwargs = kwargs
with torch.cuda.graph(self._decoder_cuda_graph):
self.static_decoder_output = self._decode(*self.static_decoder_inputs, **self.static_decoder_kwargs)
self.decoder_cuda_graph_created = True
def decode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.decoder_cuda_graph_created:
outputs = self._graph_replay_decoder(*inputs, **kwargs)
else:
self._create_cuda_graph_decoder(*inputs, **kwargs)
outputs = self._graph_replay_decoder(*inputs, **kwargs)
return outputs
else:
return self._decode(*inputs, **kwargs)
def _graph_replay_encoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_encoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_encoder_kwargs[k].copy_(kwargs[k])
self._encoder_cuda_graph.replay()
return self.static_encoder_output
def _encode(self, x, return_dict=True):
return self.vae.encode(x, return_dict=return_dict)
def _create_cuda_graph_encoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._encode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._encoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_encoder_inputs = inputs
self.static_encoder_kwargs = kwargs
with torch.cuda.graph(self._encoder_cuda_graph):
self.static_encoder_output = self._encode(*self.static_encoder_inputs, **self.static_encoder_kwargs)
self.encoder_cuda_graph_created = True
def encode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.encoder_cuda_graph_created:
outputs = self._graph_replay_encoder(*inputs, **kwargs)
else:
self._create_cuda_graph_encoder(*inputs, **kwargs)
outputs = self._graph_replay_encoder(*inputs, **kwargs)
return outputs
else:
return self._encode(*inputs, **kwargs)
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._all_cuda_graph.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._all_cuda_graph = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._all_cuda_graph):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.all_cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True):
return self.vae(sample, timestamp, encoder_hidden_states, return_dict)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
class CUDAGraph(ABC):
def __init__(self, enable_cuda_graph=False):
super().__init__()
self.enable_cuda_graph = enable_cuda_graph
@abstractmethod
def _create_cuda_graph(self):
"""
Create CUDA graph(s)
"""
raise NotImplementedError
@abstractmethod
def _graph_replay(self):
"""
Replay CUDA graph(s)
"""
raise NotImplementedError
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedBERTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed BERT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
class DeepSpeedTransformerBase(nn.module):
def __init__(self):
pass
# this would be the new clean base class that will replace DeepSpeedTransformerInference.
# we currently don't know how this will look like but keeping it here as a placeholder.
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedOPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed OPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedBloomInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed Bloom Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
from ..features.cuda_graph import CUDAGraph
class DSClipEncoder(CUDAGraph, torch.nn.Module):
def __init__(self, enc, enable_cuda_graph=False):
super().__init__(enable_cuda_graph=enable_cuda_graph)
enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
self.enc = enc
self.device = self.enc.device
self.dtype = self.enc.dtype
self.cuda_graph_created = [False, False]
self.static_inputs = [None, None]
self.static_kwargs = [None, None]
self.static_output = [None, None]
self._cuda_graphs = [None, None]
self.iter = 0
self.config = self.enc.config
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
mask.fill_(torch.tensor(torch.finfo(dtype).min))
mask.triu_(1)
mask = mask.unsqueeze(1)
return mask
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[self.iter][i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[self.iter][k].copy_(kwargs[k])
self._cuda_graphs[self.iter].replay()
return self.static_output[self.iter]
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created[self.iter]:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
self.iter = (self.iter + 1) % 2
return outputs
else:
return self.enc(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs[self.iter] = torch.cuda.CUDAGraph()
self.static_inputs[self.iter] = inputs
self.static_kwargs[self.iter] = kwargs
with torch.cuda.graph(self._cuda_graphs[self.iter]):
self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
**self.static_kwargs[self.iter])
self.cuda_graph_created[self.iter] = True
def _forward(self, *inputs, **kwargs):
return self.enc(*inputs, **kwargs)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedGPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed GPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference):
"""Initialize the DeepSpeed Megatron GPT Transformer Layer.
"""
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP
from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
inference_module = None
class DeepSpeedTransformerInference(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedTransformerInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerInference.layer_id
DeepSpeedTransformerInference.layer_id += 1
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedTransformerInference.layer_id == 1:
log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0])
if self.config.bigscience_bloom:
self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
else:
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups,
merge_count)
self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count,
mlp_extra_grouping)
device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.norm_w = None
self.norm_b = None
else:
self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.layer_past = None
try:
if config.dtype == torch.float32:
self.allocate_workspace = inference_module.allocate_workspace_fp32
elif config.dtype == torch.bfloat16:
self.allocate_workspace = inference_module.allocate_workspace_bf16
else:
self.allocate_workspace = inference_module.allocate_workspace_fp32
self._alloc_workspace = True
except AttributeError:
self.allocate_workspace = None
self._alloc_workspace = False
@classmethod
def reset_cache(cls):
if inference_module is not None:
inference_module.reset_cache()
def forward(
self,
input=None,
input_mask=None,
attention_mask=None,
attn_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
x=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
alibi=None,
output_attentions=False,
# TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API.
# This needs to be redesigned later!
layer_head_mask=None,
past_key_value=None,
**kwargs):
if x is not None:
input = x
if "hidden_states" in kwargs:
input = kwargs["hidden_states"]
input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask
# Allocate memory only on first layer forward
if self.config.layer_id == 0 and self._alloc_workspace:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
self.config.bigscience_bloom,
dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
self.config.min_out_tokens)
self._alloc_workspace = False
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
# We set the prev key/value to None when there is a prompt
if input.shape[1] > 1:
self.layer_past = None
layer_past = layer_past if layer_past is not None else self.layer_past
head_mask = layer_head_mask if layer_head_mask is not None else head_mask
attn_mask = None
if isinstance(input, tuple):
attn_mask = input[1]
input = input[0]
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
and input.dtype == torch.float:
target_dtype = torch.half if self.dtype == torch.int8 else self.dtype
input = input.to(target_dtype)
with torch.no_grad():
attention_output, key, value, context_outputtn_ctx, inp_norm = \
self.attention(input,
input_mask,
head_mask,
layer_past,
get_present,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
self.norm_w,
self.norm_b,
alibi)
presents = (key, value)
self.layer_past = presents if layer_past is None else None
output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
if not self.config.pre_layer_norm:
output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon)
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_single_tuple:
return (output, )
elif self.config.return_tuple:
return output if type(output) is tuple else (output, attn_mask)
else:
return output
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..runtime.pipe import PipelineModule, LayerSpec, TiedLayerSpec
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .containers import HFGPT2LayerPolicy
from .containers import HFBertLayerPolicy
from .containers import BLOOMLayerPolicy
from .containers import HFGPTJLayerPolicy
from .containers import HFGPTNEOLayerPolicy
from .containers import GPTNEOXLayerPolicy
from .containers import HFOPTLayerPolicy
from .containers import MegatronLayerPolicy
from .containers import HFDistilBertLayerPolicy
from .containers import HFCLIPLayerPolicy
from .containers import LLAMALayerPolicy
from .containers import UNetPolicy
from .containers import VAEPolicy
# transformer-based policies
replace_policies = [
HFBertLayerPolicy, HFGPTNEOLayerPolicy, GPTNEOXLayerPolicy, HFGPTJLayerPolicy, MegatronLayerPolicy,
HFGPT2LayerPolicy, BLOOMLayerPolicy, HFOPTLayerPolicy, HFCLIPLayerPolicy, HFDistilBertLayerPolicy, LLAMALayerPolicy
]
# non-transformer-based policies
generic_policies = [UNetPolicy, VAEPolicy]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Automatic Tensor Parallelism
import re
from torch import nn
from .replace_policy import replace_policies
class AutoTP():
def in_module_list(module, module_list):
for item in module_list:
if type(item).__name__ == type(module).__name__:
return True
return False
def get_module_list(model):
mlist = []
for child in model.children():
if isinstance(child, nn.ModuleList):
for module in child.children():
if not mlist:
mlist = [module]
elif not AutoTP.in_module_list(module, mlist):
mlist = mlist + [module]
else:
mlist = mlist + AutoTP.get_module_list(child)
return mlist
def supported(model):
unsupported = ['codegen', 'deberta', 'flaubert', 'fsmt', 'gpt2', 'led', 'longformer', 'xlm', 'xlnet']
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
assert key is not None, "Not able to determine model policy automatically. Please provide policy."
if key.group(1).lower() in unsupported:
return False
return True
def get_layers(parent, module):
layer_list = []
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + [parent + "." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
return layer_list
def update_policy_list(policy_list, new_module, new_gems):
if len(policy_list):
for i, policy in enumerate(policy_list):
# if module already exists in policy, combine gems and remove duplicates
if policy[0] == type(new_module):
new_gems = set(new_gems + policy[1])
policy_list[i] = tuple([type(new_module), new_gems])
return policy_list
policy_list.append(tuple([type(new_module), new_gems]))
return policy_list
def kernel_supported(module_list):
policy = []
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.append(orig_layer_class)
elif plcy._orig_layer_class is not None:
policy.append(plcy._orig_layer_class)
for child in module_list:
if child.__class__ in policy:
return True
return False
def tp_parser(model):
policy_list = []
module_list = []
layer_list = []
gem_list = []
module_list = AutoTP.get_module_list(model)
assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy."
for module in module_list:
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + ["." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
for i, layer in enumerate(layer_list):
if layer == 'ln':
if layer_list[i - 1] != 'ln':
gem_list = gem_list + [layer_list[i - 1]]
elif 'out_proj' in layer:
gem_list = gem_list + [layer]
elif 'o_proj' in layer:
gem_list = gem_list + [layer]
elif 'down_proj' in layer:
gem_list = gem_list + [layer]
layer_list = []
if gem_list != []:
gem_list = list(set(gem_list))
policy_list = AutoTP.update_policy_list(policy_list, module, gem_list)
gem_list = []
assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy."
return policy_list
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from deepspeed.utils.types import ActivationFuncType, NormType
import torch
from deepspeed.accelerator import get_accelerator
transformer_param_names = (
'attn_qkvw', \
'attn_qkvb', \
'attn_ow' , \
'attn_ob', \
'inter_w', \
'inter_b', \
'output_w', \
'output_b', \
'attn_nw', \
'attn_nb', \
'norm_w', \
'norm_b')
class DSPolicy(ABC):
_orig_layer_class = None
def __init__(self):
self.cuda_graph_supported = False
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
class TransformerPolicy(DSPolicy):
# a static class variable containing the HuggingFace model configuration.
# see e.g., transformers.models.opt.configuration_opt.OPTConfig
hf_model_config = None
def __init__(
self,
inference=True,
linear_layer=True,
scale_attention=True,
megatron_v2=False,
use_mup=False,
# the type of activation function used in MLP
mlp_act_func_type=ActivationFuncType.GELU,
# applies layer norm before attention if `pre_attn_norm` is set to True
pre_attn_norm=True,
# this flag shows whether or not using prefix in loading the checkpoint
use_load_prefix=False,
# whether or not the qkv is stored in the split-format
split_qkv=True,
# Type of normalization to perform
norm_type=NormType.LayerNorm):
super().__init__()
self.cuda_graph_supported = False
self.inference = inference
self.linear_layer = linear_layer
self.scale_attention = scale_attention
self.is_megatron_v2 = megatron_v2
self.use_mup = use_mup
self.mlp_act_func_type = mlp_act_func_type
self.pre_attn_norm = pre_attn_norm
self.use_load_prefix = use_load_prefix
self.split_qkv = split_qkv
self.norm_type = norm_type
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def get_hidden_heads(self):
"""
return hidden_size and number of heads
"""
raise NotImplementedError
@abstractmethod
def mlp(self):
"""
Returns mlp intermediate and output
weight: (intermediate, hidden) and (hidden, intermediate)
bias: (intermediate) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def layernorm(self):
"""
Returns LayerNorms used in transformer layer
Post-Attention and pre/post layer norm
gamma and beta with shape: (hidden)
"""
raise NotImplementedError
# TODO (lekurile): This function exists in base container as well, consolidate as some point
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
# TODO (lekurile): This function exists in megatron feature container as well, consolidate as some point
def _transpose(x, heads=1, mp_replace=None):
heads = heads // mp_replace.mp_size # type: ignore
outer_dim = -1
attention_head_size = x.shape[outer_dim] // heads
new_x_shape = x.size()[:outer_dim] + (heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=-1)
if len(q.shape) > 2:
new_shape = (q.shape[0], ) + (-1, )
return torch.cat((q.reshape(new_shape), k.reshape(new_shape), v.reshape(new_shape)),
dim=outer_dim).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor.
# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist!
def maybe_copy(module,
sd,
weight_quantizer,
mp_replace,
dst_name,
src_name,
qkv=False,
megatron_v2=False,
split_qkv=False,
heads=1):
if src_name in sd:
dst = getattr(module, dst_name)
tmp = sd[src_name]
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, tmp, num_splits=3)
else:
dst = mp_replace.copy(dst, tmp)
if qkv and megatron_v2:
dst = torch.nn.parameter.Parameter(_transpose(dst, heads=heads, mp_replace=mp_replace).contiguous())
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
(transpose(tmp).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
if qkv and megatron_v2:
tmp = _transpose(transpose(tmp), heads=heads, mp_replace=mp_replace).contiguous()
if weight_quantizer.q_int8:
tmp = transpose(tmp)
dst = mp_replace.copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
transpose(tmp)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the maybe_copy function for when the q, k, and v are in separate parameters!
def maybe_copy_qkv(module, sd, weight_quantizer, mp_replace, dst_name, src_names, split_qkv=False):
if src_names[0] in sd:
q = sd[src_names[0]]
k = sd[src_names[1]]
v = sd[src_names[2]]
qkv_data = torch.cat((q, k, v), dim=0)
dst = getattr(module, dst_name)
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, qkv_data.contiguous(), num_splits=3)
else:
dst = mp_replace.copy(dst, qkv_data)
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
((transpose(qkv_data)).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
dst = mp_replace.copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(qkv_data)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the `maybe_copy` function for when mlp1 is in separate parameters for GeGLU
def maybe_copy_geglu(module, sd, weight_quantizer, mp_replace, dst_name, src_names):
if src_names[0] in sd:
reg_proj = sd[src_names[0]]
gate_proj = sd[src_names[1]]
mlp1_data = torch.cat((reg_proj, gate_proj), dim=0)
dst = getattr(module, dst_name)
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(mlp1_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(mlp1_data)), num_splits=2, int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
def pack_lora_weights(p):
return [
p.lora_right_weight, \
p.lora_left_weight, \
p.lora_scaling
]
def maybe_get_lora(p):
if hasattr(p, 'lora_right_weight'):
lora_param = pack_lora_weights(p)
else:
lora_param = []
return lora_param
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .replace_module import replace_transformer_layer, revert_transformer_layer, ReplaceWithTensorSlicing, GroupQuantizer, generic_injection
from .module_quantize import quantize_transformer_layer
from .replace_policy import HFBertLayerPolicy
from .layers import LinearAllreduce, LinearLayer, EmbeddingLayer, Normalize
from .policy import DSPolicy
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
def module_inject(layer_obj, model, config, micro_batch_size, max_seq_length, seed, preln, fp16=True):
for name, child in model.named_children():
if isinstance(child, layer_obj):
print('REPLACING BertLayer')
cuda_config = DeepSpeedTransformerConfig(batch_size=micro_batch_size,
max_seq_length=max_seq_length,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
seed=seed,
fp16=fp16,
pre_layer_norm=preln)
new_module = DeepSpeedTransformerLayer(cuda_config)
# copy relevant state from child -> new module
qw = child.attention.self.query.weight
qb = child.attention.self.query.bias
kw = child.attention.self.key.weight
kb = child.attention.self.key.bias
vw = child.attention.self.value.weight
vb = child.attention.self.value.bias
qkvw = torch.cat((qw, kw, vw), 0)
qkvb = torch.cat((qb, kb, vb), 0)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = child.attention.output.dense.weight
new_module.attn_ob.data = child.attention.output.dense.bias
if preln:
attention_layerNorm = child.PostAttentionLayerNorm
else:
attention_layerNorm = child.attention.output.LayerNorm
new_module.attn_nw.data = attention_layerNorm.weight
new_module.attn_nb.data = attention_layerNorm.bias
if preln:
intermediate_FF = child.intermediate.dense_act
else:
intermediate_FF = child.intermediate.dense
new_module.inter_w.data = intermediate_FF.weight
new_module.inter_b.data = intermediate_FF.bias
new_module.output_w.data = child.output.dense.weight
new_module.output_b.data = child.output.dense.bias
if preln:
transformer_LayerNorm = child.PreAttentionLayerNorm
else:
transformer_LayerNorm = child.output.LayerNorm
new_module.norm_w.data = transformer_LayerNorm.weight
new_module.norm_b.data = transformer_LayerNorm.bias
setattr(model, name, copy.deepcopy(new_module))
else:
module_inject(layer_obj, child, config, micro_batch_size, max_seq_length, seed, preln, fp16)
return model
def test_hi():
from turing.nvidia_modelingpreln import BertConfig as BertConfigPreLN
from turing.nvidia_modelingpreln import BertForQuestionAnswering as BertForQuestionAnsweringPreLN
from turing.nvidia_modelingpreln import BertLayer
bert_model_config = {
"vocab_size_or_config_json_file": 119547,
"hidden_size": 1024,
"num_hidden_layers": 1,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02
}
bert_config = BertConfigPreLN(**bert_model_config)
base_model = BertForQuestionAnsweringPreLN(bert_config, args=None)
#base_model = LinearStack()
test_model = copy.deepcopy(base_model)
test_model = module_inject(BertLayer, test_model, bert_config, 4, 384, 1234)
print('BASE', base_model)
print('TEST', test_model)
#base_model.eval()
#test_model.eval()
#test_input = torch.rand(1, base_model.input_dim)
#base_output = base_model(test_input)
#test_output = test_model(test_input)
#
#assert torch.allclose(base_output, test_output, atol=3e-8)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Optional
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.accelerator import get_accelerator
from .replace_policy import HFGPT2LayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
from .utils import policy_to_ds_container
import gc
class ReplaceWithTensorSlicing:
def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0):
if mp_group is not None:
self.gpu_index = dist.get_rank(group=mp_group)
else:
self.gpu_index = 0
self.out_dim = out_dim
self.in_dim = in_dim
self.mp_size = mp_size
def merge_assert(self, dim1, dim2):
assert dim1 > dim2, \
'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
for merging your checkpoints before replacing the transformer layer with\
inference-kernels'
def strided_copy(self,
dst: Optional[torch.Tensor],
src: Optional[torch.Tensor],
num_splits: int,
int8: bool = False,
allocate_tensor: bool = False):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
outer_dim = 0 if int8 else -1
if allocate_tensor:
dst = torch.empty_like(dst)
src_split = torch.split(src.data, src.shape[outer_dim] // num_splits, dim=outer_dim)
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
qkv_size = dst_shape[self.out_dim] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=outer_dim) for src_s in src_split]
weight_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=outer_dim) for i in range(len(qkv_split[0]))
]
dst = dst.reshape(-1).data.copy_(weight_split[self.gpu_index].contiguous().reshape(-1)).reshape(
weight_split[self.gpu_index].shape)
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.parameter.Parameter(src)
qkv_size = dst_shape[0] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
bias_split = [torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=0) for i in range(len(qkv_split[0]))]
dst.data.copy_(bias_split[self.gpu_index].contiguous())
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def copy(self, dst, src, int8=False, allocate_tensor=False):
if src is None:
return src
assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors
if allocate_tensor:
dst = torch.empty_like(dst)
outer_dim = 0 if int8 else 1
inner_dim = 1 if int8 else 0
src_shape = src.shape
dst_shape = dst.shape
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[inner_dim] == dst_shape[self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
else:
if src_shape[inner_dim] != dst_shape[self.in_dim]:
self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim]] if inner_dim == 1 else \
src[self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim], :])
else:
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim]] if outer_dim == 1 else \
src[self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim], :])
else:
if src_shape[0] == dst_shape[0]:
dst = src if src.dtype == dst.dtype else dst.data.copy_(src)
else:
dst.data.copy_(src[self.gpu_index * dst_shape[-1]:(self.gpu_index + 1) * dst_shape[-1]])
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def get_transformer_name(replaced_module):
from .containers import supported_models
from torch.nn import ModuleList
transformer_name = ''
for n, c in replaced_module.named_children():
if c.__class__ in supported_models:
transformer_name += n + '.'
for name, child in c.named_children():
if child.__class__ is ModuleList:
transformer_name += name
break
break
return transformer_name
class GroupQuantizer:
def __init__(self, q_int8=True, group_size=1, num_bits=8, num_groups=0):
self.group_size = group_size
self.num_bits = num_bits
self.q_int8 = q_int8
self.num_groups = num_groups
def quantize(self, inputs, qkv=True, count=1, parallel_dim=0):
if not self.q_int8 or not qkv:
inputs = torch.nn.Parameter(inputs, requires_grad=False)
inputs.scale = torch.empty(1)
return inputs
q_range = 2**self.num_bits
num_groups = self.num_groups if self.num_groups > 0 else inputs.shape[0] // self.group_size
inputs = inputs.to(get_accelerator().current_device_name())
input_flat = inputs.reshape(num_groups, -1).contiguous()
input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float()
input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float()
scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range)
input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1)
inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous()
out = torch.nn.Parameter(inputs_q, requires_grad=False)
inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim)
input_flat = [inputs_split[i].reshape(num_groups, -1).contiguous() for i in range(2)]
input_min = [torch.min(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
input_max = [torch.max(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
scale1 = [(torch.max(input_min[i].abs(), input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0)
for i in range(2)]
out.scale = torch.cat([scale.squeeze().unsqueeze(0), scale1[0], scale1[1]], dim=0).reshape(num_groups,
-1).contiguous()
return out
def _module_match(module):
for policy in generic_policies:
policy = policy()
if policy.match(module):
return policy
return None
def generic_injection(module, fp16=False, bf16=False, enable_cuda_graph=True):
def replace_attn(child, policy):
policy_attn = policy.attention(child)
if policy_attn is None:
return child
if len(policy_attn) == 5:
qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn
else:
qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn
config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=heads,
fp16=fp16,
bf16=bf16,
triangular_masking=False,
max_out_tokens=4096,
)
attn_module = DeepSpeedDiffusersAttention(config)
def transpose(data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
if len(policy_attn) == 5:
attn_module.attn_qkvw.data = transpose(qkvw.data)
else:
attn_module.attn_qkvw = None
attn_module.attn_qw.data = transpose(qw.data)
attn_module.attn_kw.data = transpose(kw.data)
attn_module.attn_vw.data = transpose(vw.data)
attn_module.attn_qkvb = None
attn_module.attn_ow.data = transpose(attn_ow.data)
attn_module.attn_ob.data.copy_(attn_ob.data.to(get_accelerator().current_device_name()))
return attn_module
def replace_attn_block(child, policy):
config = Diffusers2DTransformerConfig()
return DeepSpeedDiffusersTransformerBlock(child, config)
if isinstance(module, torch.nn.Module):
pass
else:
if fp16 is False and bf16 is False:
raise ValueError("Generic injection only supported with FP16 or BF16")
try:
import diffusers
if hasattr(diffusers.models.attention, 'CrossAttention'):
cross_attention = diffusers.models.attention.CrossAttention
else:
cross_attention = diffusers.models.attention_processor.Attention
attention_block = diffusers.models.attention.BasicTransformerBlock
new_policies = {
cross_attention: replace_attn,
attention_block: replace_attn_block,
}
except ImportError:
new_policies = {}
#replace_transformer_layer(None,
# module.text_encoder,
# training=False,
# replace_with_kernel_inject=True,
# triangular_masking=True,
# max_out_tokens=8192)
from ..model_implementations.transformers.clip_encoder import DSClipEncoder
cg_encoder = DSClipEncoder(module.text_encoder, enable_cuda_graph=enable_cuda_graph)
setattr(module, 'text_encoder', cg_encoder)
for name in module.__dict__.keys():
sub_module = getattr(module, name)
policy = _module_match(sub_module)
if policy is not None:
def _replace_module(module, policy):
for name, child in module.named_children():
_replace_module(child, policy)
if child.__class__ in new_policies:
replaced_module = new_policies[child.__class__](child, policy)
setattr(module, name, replaced_module)
_replace_module(sub_module, policy)
new_module = policy.apply(sub_module, enable_cuda_graph=enable_cuda_graph)
print(f"**** found and replaced {name} w. {type(new_module)}")
setattr(module, name, new_module)
container_g = None
def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, model_config):
""" Replace bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine
config: top-level DS Inference config defined in inference/config.py
model_config: HuggingFace model config passed from the inference/engine.py
Returns:
Updated nn.module with replaced transformer layers
"""
# defining globals as internally defined functions inherit these everywhere
quantize = (config.dtype == torch.int8)
# todo: Refactor later. In future, let's minimize the style used above and use config.** instead
linear_layer_setting = None
'''
linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers
'''
micro_batch_size = -1
seed = -1
local_rank = -1
mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group,
mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
def replace_with_policy(child, policy_cls, triangular_masking, inference=False, layer_id=0):
policy = policy_cls(child, inference=inference)
if not policy.cuda_graph_supported:
# policy says cuda graph is not supported raise an error if set
assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable"
from deepspeed.moe.layer import MoE
moe = False
if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):
num_experts = child.mlp.num_experts
moe = True
# 1. Create a model-specific container object using the policy object.
_container = policy_to_ds_container(policy=policy,
config=config,
model_config=model_config,
layer_id=layer_id,
child=child)
_container.set_moe(moe)
# 2. Set the tensor parallelism config
_container.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group)
# 3. Initialize tensors
_container.initialize_tensors()
# 4. deal with data types -- needs refactor to use dtype instead of fp16
if config.dtype in [torch.float16, torch.bfloat16, torch.int8]:
_container.convert_to_required_dtype()
# 5. Set the quantization config
quantizer = GroupQuantizer(q_int8=quantize)
_container.set_quantization_config(quantizer)
# 6. create a DS Inference config object
_container.create_ds_model_config()
# 7. use the config and create the module
_container.create_module()
# 8. transpose the weights and bias if needed
_container.transpose()
# 9. deal with tensor parallelism.
_container.apply_tensor_parallelism(mp_replace)
# 10. copy the tensors from the model-specific container to the new module
_container.copy_data_to_new_module()
# 11. set global for generic checkpoint loading
global container_g
if container_g is None:
container_g = _container
return _container.module
def replace_wo_policy(module, all_reduce_linears, prefix="", state_dict=None):
mp_size = config.tensor_parallel.tp_size
mp_group = config.tensor_parallel.tp_group
def _replace(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
weight_shape = child.weight.shape
if name in all_reduce_linears:
new_weight = torch.empty((
weight_shape[1] if conv_linear_layer else weight_shape[0],
(weight_shape[0] if conv_linear_layer else weight_shape[1]) // mp_size,
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0]), device=child.weight.device, dtype=child.weight.dtype)
if child.bias is not None:
new_bias.data.copy_(child.bias.data)
setattr(child, "replaced", True)
return LinearAllreduce(data, child.bias if child.bias is None else \
torch.nn.parameter.Parameter(new_bias.to(get_accelerator().current_device_name())), mp_group)
else:
new_weight = torch.empty((
(weight_shape[1] if conv_linear_layer else weight_shape[0]) // mp_size,
weight_shape[0] // mp_size if conv_linear_layer else weight_shape[1],
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
bias_data = None if child.bias is None else mp_replace.copy(new_bias, child.bias.data).to(
get_accelerator().current_device_name())
setattr(child, "replaced", True)
return LinearLayer(weight=data.to(get_accelerator().current_device_name()), bias=bias_data)
def _slice_embedding(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
new_weight = torch.empty((child.weight.shape[0], child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
data = mp_replace.copy(new_weight,
child.weight.ds_tensor.data if hasattr(child.weight, 'ds_tensor') else \
child.weight.data)
new_embedding = nn.Embedding(child.weight.shape[0], child.weight.shape[1] // mp_size)
new_embedding.weight.data.copy_(data)
setattr(child, "replaced", True)
return new_embedding
def update_mp_params(child):
if getattr(child, "replaced", False) == True:
return
if hasattr(child, 'n_heads'):
assert child.n_heads % mp_size == 0, "n_heads ({}) must be divisible by mp_size ({})".format(
child.n_heads, mp_size)
child.n_heads = child.n_heads // mp_size
if hasattr(child, 'inner_dim'):
assert child.inner_dim % mp_size == 0, "inner_dim ({}) must be divisible by mp_size ({})".format(
child.inner_dim, mp_size)
child.inner_dim = child.inner_dim // mp_size
if hasattr(child, 'num_heads'):
assert child.num_heads % mp_size == 0, "num_heads ({}) must be divisible by mp_size ({})".format(
child.num_heads, mp_size)
child.num_heads = child.num_heads // mp_size
if hasattr(child, 'num_attention_heads'):
assert child.num_attention_heads % mp_size == 0, "num_attention_heads ({}) must be divisible by mp_size ({})".format(
child.num_attention_heads, mp_size)
child.num_attention_heads = child.num_attention_heads // mp_size
if hasattr(child, 'num_attn_heads'):
assert child.num_attn_heads % mp_size == 0, "num_attn_heads ({}) must be divisible by mp_size ({})".format(
child.num_attn_heads, mp_size)
child.num_attn_heads = child.num_attn_heads // mp_size
if hasattr(child, 'all_head_size'):
assert child.all_head_size % mp_size == 0, "all_head_size ({}) must be divisible by mp_size ({})".format(
child.all_head_size, mp_size)
child.all_head_size = child.all_head_size // mp_size
if hasattr(child, 'embed_dim'):
assert child.embed_dim % mp_size == 0, "embed_dim must ({}) be divisible by mp_size ({})".format(
child.embed_dim, mp_size)
child.embed_dim = child.embed_dim // mp_size
if hasattr(child, 'hidden_size'):
assert child.hidden_size % mp_size == 0, "hidden_size ({}) must be divisible by mp_size ({})".format(
child.hidden_size, mp_size)
child.hidden_size = child.hidden_size // mp_size
setattr(child, "replaced", True)
conv_linear_layer = False
if linear_layer_setting is not None:
linear_policies = {linear_layer_setting[0]: _replace}
if len(linear_layer_setting) == 2:
linear_policies.update({linear_layer_setting[1]: _slice_embedding})
else:
if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:
try:
import transformers
conv_linear_layer = True
linear_policies = {transformers.model_utils.Conv1D: _replace}
except ImportError:
linear_policies = {nn.Linear: _replace}
else:
linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}
def _replace_module(r_module, prev_name='', prev_class_name=''):
for name, child in r_module.named_children():
if prev_class_name == "":
class_name = prev_name
elif prev_name == "":
class_name = prev_class_name
else:
class_name = prev_class_name + '.' + prev_name
checking_key = prefix + '.' + class_name + '.' + name + '.' if class_name != "" else prefix + '.' + name + '.'
if child.__class__ in [nn.Linear, nn.Embedding, nn.LayerNorm] and state_dict != None:
if any(checking_key in item for item in state_dict):
load(child, state_dict, checking_key, mp_group)
else:
continue
if len(child._buffers) != 0 and state_dict != None:
load_buffer(child, state_dict, checking_key)
if child.__class__ in linear_policies:
setattr(r_module, name, linear_policies[child.__class__](child, prev_name + '.' + name,
conv_linear_layer))
else:
update_mp_params(child)
_replace_module(child, name, class_name)
return r_module
return _replace_module(module)
def replace_fn(child, _policy, layer_id=0, prefix="", state_dict=None):
training = False # todo: refactor this part to go in the config
if training:
# copy relevant state from child -> new module
new_module = replace_with_policy(child, _policy, config.triangular_masking)
else:
# copy relevant state from child -> new module
if config.replace_with_kernel_inject:
new_module = replace_with_policy(child,
_policy,
config.triangular_masking,
inference=True,
layer_id=layer_id)
else:
new_module = replace_wo_policy(child, _policy, prefix=prefix, state_dict=state_dict)
return new_module
if checkpoint_dict != None and not config.replace_with_kernel_inject:
# AutoTP shard loading
checkpoint = checkpoint_dict["checkpoints"]
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple,
checkpoint=checkpoint[i])
pbar.update(1)
gc.collect()
else:
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple)
quantizer = GroupQuantizer(q_int8=quantize)
world_size = dist.get_world_size() if dist.is_initialized() else 1
rank = dist.get_rank() if dist.is_initialized() else 0
if checkpoint_dict is not None and config.replace_with_kernel_inject:
assert container_g.ckpt_load_enabled, \
f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container"
start_time = time.time()
checkpoint = checkpoint_dict['checkpoints']
ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint
ckpt_type = checkpoint_dict.get('parallelization', 'pp')
ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list))
ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size)
base_dir1 = checkpoint_dict.get('base_dir', config.base_dir)
if ckpt_type == 'pp' and type(checkpoint) is list:
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
sd = [torch.load(os.path.join(base_dir1, checkpoint[i]), map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
container=container_g)
pbar.update(1)
else:
num_checkpoints = len(ckpt_list) // ckpt_mp_size
tp_split_size = (world_size / ckpt_mp_size)
sd_offset = int(rank / tp_split_size)
sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset
pbar = tqdm.tqdm(total=num_checkpoints, desc=f"Loading {num_checkpoints} checkpoint shards")
for i in range(num_checkpoints):
pbar.update(1)
ckpt_index = i * ckpt_mp_size + sd_offset
ckpt_files = [
os.path.join(base_dir1, ckpt_list[ckpt_index + j]) if base_dir1 else ckpt_list[ckpt_index + j]
for j in range(sd_count)
]
sds = [torch.load(ckpt_file, map_location='cpu') for ckpt_file in ckpt_files]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
if "non_tp" in checkpoint:
pbar = tqdm.tqdm(total=len(checkpoint["non_tp"]),
desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards")
for i in range(len(checkpoint["non_tp"])):
pbar.update(1)
ckpt_file = os.path.join(base_dir1,
checkpoint["non_tp"][i]) if base_dir1 else checkpoint["non_tp"][i]
sds = [torch.load(ckpt_file, map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec")
if config.save_mp_checkpoint_path is not None:
from collections import OrderedDict
import json
num_partitions = 8
if checkpoint_dict is None:
ckpt_name = "ds_model"
try:
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
if isinstance(model, BloomForCausalLM):
ckpt_name = "bloom"
except ImportError:
ckpt_name = "ds_model"
else:
ckpt_name = checkpoint_dict['type']
if dist.is_initialized():
dist.barrier()
transformer_name = get_transformer_name(replaced_module)
non_tp_ckpt_name = f'non-tp.pt'
ckpt_files = [non_tp_ckpt_name]
os.makedirs(config.save_mp_checkpoint_path, exist_ok=True)
if not dist.is_initialized() or dist.get_rank() == 0:
print("Saving tp-sharded checkpoints")
torch.save(
OrderedDict({k: v
for k, v in dict(replaced_module.state_dict()).items()
if transformer_name not in k}), f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}')
dtype_reprs = {
torch.float32: 'float32',
torch.float16: 'float16',
torch.int8: 'int8',
torch.bfloat16: 'bfloat16'
}
ckpt_config = json.dumps({
'type': ckpt_name,
'base_dir': f'{config.save_mp_checkpoint_path}',
'checkpoints': {
"non_tp": ckpt_files,
"tp": [f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions) for r in range(world_size)]
},
'version': 1.0,
'parallelization': 'tp',
'tp_size': world_size,
'dtype': dtype_reprs[config.dtype]
})
with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json", "w") as cfg:
cfg.write(ckpt_config)
rep_sd = replaced_module.state_dict()
for n, p in replaced_module.named_parameters():
if hasattr(p, 'scale'):
rep_sd[n] = [p, p.scale]
keys = list(rep_sd.keys())
partition_size = (len(keys) // num_partitions + 1)
for m in range(num_partitions):
torch.save(
OrderedDict({
k: [rep_sd[k], rep_sd[k].scale] if hasattr(rep_sd[k], 'scale') else rep_sd[k]
for k in keys[m * partition_size:(m + 1) * partition_size] if transformer_name in k
}), f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt')
return replaced_module
def revert_transformer_layer(orig_layer_impl, model, config, preln=False):
""" Revert DeepSpeed's transformer layer back to original bert-style transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
config (dict): model config containing hidden size, attention heads, etc.
Returns:
Updated nn.module with original bert-style transformer layers
"""
def replace_fn(child, _replace_policy, layer_id):
#from turing.nvidia_modelingpreln import BertLayer
orig_module = orig_layer_impl(config)
# copy relevant state from child -> original module
qkvw = child.attn_qkvw.data
qkvb = child.attn_qkvb.data
qw, kw, vw = torch.chunk(qkvw, 3, axis=0)
qb, kb, vb = torch.chunk(qkvb, 3, axis=0)
orig_module.attention.self.query.weight.data = qw
orig_module.attention.self.query.bias.data = qb
orig_module.attention.self.key.weight.data = kw
orig_module.attention.self.key.bias.data = kb
orig_module.attention.self.value.weight.data = vw
orig_module.attention.self.value.bias.data = vb
orig_module.attention.output.dense.weight.data = child.attn_ow.data
orig_module.attention.output.dense.bias.data = child.attn_ob.data
attn_ln_w = child.attn_nw.data
attn_ln_b = child.attn_nb.data
if preln:
orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w
orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b
else:
orig_module.attention.output.LayerNorm.weight.data = attn_ln_w
orig_module.attention.output.LayerNorm.bias.data = attn_ln_b
inter_ff_w = child.inter_w.data
inter_ff_b = child.inter_b.data
if preln:
orig_module.intermediate.dense_act.weight.data = inter_ff_w
orig_module.intermediate.dense_act.bias.data = inter_ff_b
else:
orig_module.intermediate.dense.weight.data = inter_ff_w
orig_module.intermediate.dense.bias.data = inter_ff_b
orig_module.output.dense.weight.data = child.output_w.data
orig_module.output.dense.bias.data = child.output_b.data
transformer_ln_w = child.norm_w.data
transformer_ln_b = child.norm_b.data
if preln:
orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w
orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b
else:
orig_module.output.LayerNorm.weight.data = transformer_ln_w
orig_module.output.LayerNorm.bias.data = transformer_ln_b
return orig_module
return replace_module(model=model,
orig_class=deepspeed.DeepSpeedTransformerLayer,
replace_fn=replace_fn,
_replace_policy=None)
def replace_module(model, orig_class, replace_fn, _replace_policy, checkpoint=None):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
sd = None
if checkpoint != None:
sd = torch.load(checkpoint, map_location='cpu')
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy, state_dict=sd)
if checkpoint != None:
embedding_weight = None
for n, p in replaced_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(replaced_module, "lm_head") and hasattr(
replaced_module.lm_head, "weight") and replaced_module.lm_head.weight.is_meta:
replaced_module.lm_head.weight = embedding_weight
return replaced_module
from ..pipe import PipelineModule
import re
def skip_level_0_prefix(model, name):
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
if key is not None and key.group(1).lower() in "bloom":
# if keys start with 'model.', don't skip level 0 prefix
if not re.match("^model[.]", name):
return True
return False
def load_buffer(module, state_dict, prefix):
for name in module._buffers.keys():
if module._buffers[name].data.is_meta:
module._buffers[name] = torch.nn.parameter.Parameter(
data=torch.empty_like(module._buffers[name].data, device="cpu"),
requires_grad=module._buffers[name].data.requires_grad)
if prefix + name in state_dict.keys():
module._buffers[name].data.copy_(state_dict[prefix + name])
def _replace_module(model, policies, prefix='', layer_id=0, level_id=0, state_dict=None):
""" Traverse model's children recursively and apply any transformations in ``policies``.
Arguments:
model (torch.nn.Module): model to augment
policies (dict): Mapping of source class to replacement function.
Returns:
Modified ``model``.
"""
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm, OPTLearnedPositionalEmbedding]
for name, child in model.named_children():
if child.__class__ in policies:
replaced_module = policies[child.__class__][0](child,
policies[child.__class__][-1],
layer_id,
prefix=prefix + name,
state_dict=state_dict)
setattr(model, name, replaced_module)
if isinstance(model, PipelineModule):
assert hasattr(model, 'forward_funcs'),\
"we require pipe-module to have the list of fwd_functions"
model.forward_funcs[model.fwd_map[name]] = replaced_module
layer_id += 1
else:
checking_key = prefix + name + '.'
if child.__class__ in load_layers and state_dict != None:
if any(checking_key in item for item in state_dict):
load(
child,
state_dict,
checking_key,
)
else:
continue
if len(child._buffers) != 0 and state_dict != None:
load_buffer(child, state_dict, checking_key)
_, layer_id = _replace_module(child,
policies,
prefix if level_id == 0 and skip_level_0_prefix(model, name) else \
prefix + name + '.',
layer_id=layer_id,
level_id=level_id + 1,
state_dict=state_dict)
# Add the reset_cache func to the model, so that it can be called in the beginning of text-generation.
model.reset_cache = transformer_inference.DeepSpeedTransformerInference.reset_cache
return model, layer_id
def load(module, state_dict, prefix, mp_group=None):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'weight'):
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.weight.data,
device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in state_dict.keys():
if hasattr(module, 'bias'):
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias, state_dict[prefix + 'bias'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'bias'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.bias.data,
device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.utils import log_dist
# helper function to map between DS policies and DS containers
def policy_to_ds_container(**kwargs):
from .containers import HFGPT2LayerPolicy, DS_GPT2Container
from .containers import HFBertLayerPolicy, DS_BERTContainer
from .containers import BLOOMLayerPolicy, DS_BloomContainer
from .containers import HFGPTJLayerPolicy, DS_GPTJContainer
from .containers import HFGPTNEOLayerPolicy, DS_GPTNEOContainer
from .containers import GPTNEOXLayerPolicy, DS_GPTNEOXContainer
from .containers import HFOPTLayerPolicy, DS_OPTContainer
from .containers import MegatronLayerPolicy, DS_MegatronGPTContainer
from .containers import HFDistilBertLayerPolicy, DS_DistilBERTContainer
from .containers import LLAMALayerPolicy, DS_LLAMAContainer
policy_to_container = {
HFGPT2LayerPolicy: DS_GPT2Container,
HFBertLayerPolicy: DS_BERTContainer,
BLOOMLayerPolicy: DS_BloomContainer,
HFGPTJLayerPolicy: DS_GPTJContainer,
HFGPTNEOLayerPolicy: DS_GPTNEOContainer,
GPTNEOXLayerPolicy: DS_GPTNEOXContainer,
HFOPTLayerPolicy: DS_OPTContainer,
MegatronLayerPolicy: DS_MegatronGPTContainer,
HFDistilBertLayerPolicy: DS_DistilBERTContainer,
LLAMALayerPolicy: DS_LLAMAContainer,
}
container = None
policy = kwargs['policy']
assert policy is not None, "Policy cannot be None"
policy_type = type(policy)
if policy_type not in policy_to_container:
log_dist(f"Policy type {policy_type} not supported", [0])
else:
container = policy_to_container[policy_type](**kwargs)
return container
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import deepspeed.ops.transformer as transformer_inference
from .layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding, RMSNormalize
import torch
import gc
from deepspeed.accelerator import get_accelerator
import re
def load_model_with_checkpoint(r_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
weight_quantizer=None,
rank=0,
container=None):
error_msgs = []
def prefix_check():
# if keys start with 'model.', don't skip level 0 prefix
for key in sd[0].keys():
if re.match("^model[.]", key):
return False
return True
skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
def load(module, prefix):
args = (sd[0], prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
module.weight = mp_replace.copy(module.weight.data, sd[0][prefix + 'weight'])
if prefix + 'bias' in sd[0].keys():
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias'])
args = None
gc.collect()
def load_transformer_layer(module, prefix):
if ckpt_type == "tp":
def load_parameters(module, prefix):
for n, p in module.named_parameters():
if prefix + n in sd[0] and len(n.split('.')) == 1:
if type(sd[0][prefix + n]) is list:
tmp_data, scale = sd[0][prefix + n]
tmp_data = tmp_data
scale = scale.to(get_accelerator().current_device_name())
# set the quantizer number of groups using the checkpoint scale shape
weight_quantizer.num_groups = scale.shape[0]
else:
tmp_data = sd[0][prefix + n].to(get_accelerator().current_device_name())
scale = None
src_shape = tmp_data.shape
dst_shape = p.shape
inner_dim = 1 if tmp_data.dtype == torch.int8 else 0
outer_dim = 0 if tmp_data.dtype == torch.int8 else 1
if (len(src_shape) == 2 and len(dst_shape) == 2):
if (src_shape[inner_dim] == dst_shape[0] and src_shape[outer_dim] == dst_shape[1]):
if tmp_data.dtype != torch.int8:
p = weight_quantizer.quantize(
transpose(tmp_data) if weight_quantizer.q_int8 else tmp_data)
else:
p = torch.nn.parameter.Parameter(tmp_data, requires_grad=False)
p.scale = scale
setattr(module, n, p)
else:
dim = inner_dim if src_shape[inner_dim] != dst_shape[0] else outer_dim
dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1
if src_shape[dim] > dst_shape[dim1]:
weight_partition = torch.split(tmp_data, dst_shape[dim1], dim=dim)[rank].to(
get_accelerator().current_device_name())
assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \
'''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\
Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!'''
scale = scale.view(-1)[weight_quantizer.num_groups * (rank + 1):].reshape(
weight_quantizer.num_groups, -1).contiguous()
else:
assert tmp_data.dtype != torch.int8, \
'''Merging of the checkpoints are not supported when using INT8 checkpoint! \
Please use a as many GPUs as TP-size for the checkpoint'''
all_data = [
sd[j][prefix + n] if type(sd[j][prefix + n]) is list else sd[j][prefix + n].to(
get_accelerator().current_device_name()) for j in range(len(sd))
]
# Check if the weight tensor is for the QKV parameter
if src_shape[1] == (3 * src_shape[0]) // ckpt_mp_size:
qkv_size = src_shape[outer_dim] // 3
src_split = [
torch.split(src[0].data, qkv_size, dim=outer_dim) for src in all_data
]
weight_partition = torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=outer_dim)
for i in range(len(src_split[0]))
],
dim=dim)
else:
weight_partition = torch.cat([
ad[0].to(get_accelerator().current_device_name())
if type(ad) is list else ad for ad in all_data
],
dim=dim)
if tmp_data.dtype == torch.int8:
scale = torch.cat(
[ad[1].to(get_accelerator().current_device_name()) for ad in all_data],
dim=dim)
if tmp_data.dtype != torch.int8:
weight_partition = weight_quantizer.quantize(
transpose(weight_partition), \
parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \
weight_quantizer.quantize(weight_partition)
else:
weight_partition = torch.nn.parameter.Parameter(weight_partition,
requires_grad=False)
weight_partition.scale = scale
setattr(module, n, weight_partition)
else:
if src_shape[0] == dst_shape[0]:
p.data.copy_(tmp_data)
else:
if src_shape[0] > dst_shape[0]:
bias_split = torch.split(tmp_data, dst_shape[-1])[rank].to(
get_accelerator().current_device_name()).contiguous()
p.data.copy_(bias_split)
else:
# Check if the weight tensor is for the QKV parameter
if src_shape[0] == (3 * r_module.config.hidden_size) // ckpt_mp_size:
qkv_size = src_shape[0] // 3
src_split = [
torch.split(sd[j][prefix + n], qkv_size, dim=0) for j in range(len(sd))
]
p.data.copy_(
torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=0)
for i in range(len(src_split[0]))
],
dim=0).to(get_accelerator().current_device_name()).contiguous())
else:
p.data.copy_(
torch.cat([sd[j][prefix + n] for j in range(len(sd))],
dim=0).to(get_accelerator().current_device_name()).contiguous())
load_parameters(module, prefix)
for n, child in module.named_children():
load_parameters(child, prefix + n + '.')
else:
container.load_params(module, sd[0], weight_quantizer, mp_replace, prefix)
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
if hasattr(transformers.models, "llama"):
LlamaRMSNorm = transformers.models.llama.modeling_llama.LlamaRMSNorm
else:
LlamaRMSNorm = None
except:
OPTLearnedPositionalEmbedding = None
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
EmbeddingLayer: load,
LinearLayer: load,
Normalize: load,
transformer_inference.DeepSpeedTransformerInference: load_transformer_layer,
DeepSpeedBloomInference: load_transformer_layer,
DeepSpeedGPTInference: load_transformer_layer,
DeepSpeedBERTInference: load_transformer_layer,
DeepSpeedMegatronGPTInference: load_transformer_layer,
DeepSpeedOPTInference: load_transformer_layer,
OPTLearnedPositionalEmbedding: load,
OPTEmbedding: load,
LlamaRMSNorm: load,
RMSNormalize: load
}
all_ds_ids = {}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in sd[0].keys()):
if hasattr(child, 'weight') and \
(hasattr(child.weight, 'ds_id') and \
child.weight.ds_id in all_ds_ids):
prefix1 = all_ds_ids[child.weight.ds_id]
if child.__class__ is nn.Linear:
child = LinearLayer(weight=all_ds_ids[child.weight.ds_id])
setattr(module, name, child)
continue
child_params = list(child.parameters())
if len(child_params) > 0 and (child_params[0].numel() == 0 or child_params[0].is_meta):
if child.weight.is_meta:
ds_shape = child.weight.shape
else:
ds_shape = child.weight.ds_shape
if child.__class__ is nn.LayerNorm:
child = Normalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
elif child.__class__ is nn.Linear:
child = LinearLayer(weight_shape=child.weight.shape, bias=child.bias)
setattr(module, name, child)
elif child.__class__ is OPTLearnedPositionalEmbedding:
child = OPTEmbedding(weight_shape=ds_shape)
setattr(module, name, child)
elif child.__class__ is LlamaRMSNorm:
child = RMSNormalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.variance_epsilon)
setattr(module, name, child)
else:
ds_id = None
if hasattr(child.weight, 'ds_id'):
ds_id = child.weight.ds_id
child = EmbeddingLayer(weight_shape=ds_shape, dtype=child.weight.dtype)
if ds_id is not None:
all_ds_ids[ds_id] = child.weight
setattr(module, name, child)
layer_policies[child.__class__](child, prefix + name + '.')
else:
load_module_recursive(
child,
prefix if (level == 0 and ckpt_type == 'pp') and skip_level_0_prefix else \
prefix + name + '.',
level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
for sd_ in sd:
del sd_
sd = None
gc.collect()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from deepspeed.accelerator import get_accelerator
class LinearAllreduce(nn.Module):
def __init__(self, weight, bias=None, mp_group=None):
super(LinearAllreduce, self).__init__()
self.weight = weight
self.bias = bias
self.mp_group = mp_group
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.mp_group is not None:
dist.all_reduce(output, group=self.mp_group)
if self.bias is not None:
output += self.bias
return output
class LinearLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(LinearLayer, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.weight = Parameter(
torch.empty(weight_shape, dtype=dtype, device=get_accelerator().current_device_name()))
self.bias = Parameter(
torch.empty(weight_shape[0],
dtype=dtype,
device=get_accelerator().current_device_name())) \
if bias is not None else None
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.bias is not None:
output += self.bias
return output
class Normalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None, bias=None):
super(Normalize, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.norm = nn.LayerNorm(dim, eps=eps).to(dtype).to(get_accelerator().current_device_name())
self.weight = self.norm.weight
self.bias = self.norm.bias
self.eps = eps
def forward(self, input):
return nn.functional.layer_norm(input, input.shape[-1:], self.weight, self.bias, eps=self.eps)
class EmbeddingLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(EmbeddingLayer, self).__init__()
if weight is None:
self.weight = Parameter(
torch.empty(weight_shape[0],
weight_shape[1],
dtype=dtype,
device=get_accelerator().current_device_name()))
else:
self.weight = weight
def forward(self, input):
return F.embedding(input, self.weight)
class OPTEmbedding(EmbeddingLayer):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, weight_shape=None, weight=None, bias=None):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(weight_shape, weight=weight)
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class RMSNormalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None):
super(RMSNormalize, self).__init__()
if weight is not None:
self.weight = weight
else:
self.weight = nn.Parameter(torch.ones(dim, dtype=dtype, device=get_accelerator().current_device_name()))
self.eps = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
print(self.weight)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return hidden_states * self.weight
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False):
""" Quantize bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
megatron (bool): megatron model-parallel implementation (this is supported for inference only)
preln (bool): does the original layer implementation do pre or post layer norm?
Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag.
Returns:
Updated nn.module with quantized transformer layers
"""
def quantize_weight(weight):
return weight.to(torch.int8)
def megatron_layer_quantize(layer):
layer.attention.query_key_value.weight.data = quantize_weight(layer.attention.query_key_value.weight.data)
layer.attention.dense.weight.data = quantize_weight(layer.attention.dense.weight.data)
layer.mlp.dense_h_to_4h.weight.data = quantize_weight(layer.mlp.dense_h_to_4h.weight.data)
layer.mlp.dense_4h_to_h.weight.data = quantize_weight(layer.mlp.dense_4h_to_h.weight.data)
def bert_layer_quantize(layer):
layer.attention.self.query.weight.data = quantize_weight(layer.attention.self.query.weight.data)
layer.attention.self.key.weight.data = quantize_weight(layer.attention.self.key.weight.data)
layer.attention.self.value.weight.data = quantize_weight(layer.attention.self.value.weight.data)
layer.attention.output.dense.weight.data = quantize_weight(layer.attention.output.dense.weight.data)
if preln:
layer.intermediate.dense_act.weight.data = quantize_weight(layer.intermediate.dense_act.weight.data)
else:
layer.intermediate.dense.weight.data = quantize_weight(layer.intermediate.dense.weight.data)
layer.output.dense.weight.data = quantize_weight(layer.output.dense.weight.data)
def quantize_fn(child):
if megatron:
# Quantize megatron GPT2 / GPT3 trained model
megatron_layer_quantize(child)
else:
# Quantize either DeepSpeed or HuggingFace trained model
bert_layer_quantize(child)
return child
return quantize_module(model=model, orig_class=orig_layer_impl, quantize_fn=quantize_fn)
def quantize_module(model, orig_class, quantize_fn):
policy = {orig_class: quantize_fn}
return _quantize_module(model, policy)
def _quantize_module(model, policies):
for name, child in model.named_children():
if child.__class__ in policies:
orig = repr(child)
setattr(model, name, policies[child.__class__](child))
new = getattr(model, name)
else:
_quantize_module(child, policies)
return model
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from ..policy import TransformerPolicy
class DS_GPT2Container(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFGPT2LayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
# HuggingFace GPT2 uses convolutional layer instead of linear layer
super().__init__(inference, linear_layer=False)
self.client_module = client_module
try:
import transformers
HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block
except:
HFGPT2LayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
return self.client_module.attn.c_attn.weight, \
self.client_module.attn.c_attn.bias, \
self.client_module.attn.c_proj.weight, \
self.client_module.attn.c_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn.parameter import Parameter
from ..policy import DSPolicy
from ...model_implementations.diffusers.unet import DSUNet
class UNetPolicy(DSPolicy):
def __init__(self):
super().__init__()
try:
import diffusers
self._orig_layer_class = diffusers.models.unet_2d_condition.UNet2DConditionModel
except ImportError:
self._orig_layer_class = None
def match(self, module):
return isinstance(module, self._orig_layer_class)
def match_replaced(self, module):
return isinstance(module, DSUNet)
def apply(self, module, enable_cuda_graph=True):
# TODO(cmikeh2): Enable cuda graph should be an inference configuration
return DSUNet(module, enable_cuda_graph=enable_cuda_graph)
def attention(self, client_module):
qw = client_module.to_q.weight
kw = client_module.to_k.weight
vw = client_module.to_v.weight
if qw.shape[1] == kw.shape[1]:
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False)
return qkvw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
else:
#return None
#kvw = Parameter(torch.cat((kw, vw), dim=0), requires_grad=False)
return qw, \
kw, vw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.split_qkv import HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTJContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.fc_in, self.policy.client_module.mlp.fc_out,
self.policy.client_module.attn.q_proj, self.policy.client_module.attn.k_proj,
self.policy.client_module.attn.v_proj, self.policy.client_module.attn.out_proj
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.v_proj.weight
self.vb = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.q_proj.weight', \
'attn.k_proj.weight', \
'attn.v_proj.weight', \
'attn.out_proj.weight', \
'mlp.fc_in.weight', \
'mlp.fc_in.bias', \
'mlp.fc_out.weight', \
'mlp.fc_out.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(4, 8):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(8, 10):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i + 2],
prefix + param_names[i])
class HFGPTJLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=True)
self.client_module = client_module
try:
import transformers
HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock
except:
HFGPTJLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_attention_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attn.q_proj.weight
kw = self.client_module.attn.k_proj.weight
vw = self.client_module.attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.out_proj.weight, \
None,
def mlp(self, enable_training=False):
return self.client_module.mlp.fc_in.weight, \
self.client_module.mlp.fc_in.bias, \
self.client_module.mlp.fc_out.weight, \
self.client_module.mlp.fc_out.bias
def layernorm(self):
return None, \
None, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .bert import DS_BERTContainer, HFBertLayerPolicy
from .bloom import DS_BloomContainer, BLOOMLayerPolicy, supported_models
from .distil_bert import DS_DistilBERTContainer, HFDistilBertLayerPolicy
from .gpt2 import DS_GPT2Container, HFGPT2LayerPolicy
from .gptj import DS_GPTJContainer, HFGPTJLayerPolicy
from .gptneo import DS_GPTNEOContainer, HFGPTNEOLayerPolicy
from .gptneox import DS_GPTNEOXContainer, GPTNEOXLayerPolicy
from .llama import DS_LLAMAContainer, LLAMALayerPolicy
from .megatron_gpt import DS_MegatronGPTContainer, MegatronLayerPolicy
from .megatron_gpt_moe import DS_MegatronGPTMoEContainer, MegatronMoELayerPolicy
from .opt import DS_OPTContainer, HFOPTLayerPolicy
from .clip import DS_CLIPContainer, HFCLIPLayerPolicy
from .unet import UNetPolicy
from .vae import VAEPolicy
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .base_moe import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from .megatron_gpt import MegatronLayerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTMoEContainer(MegatronContainer, BaseTransformerMoEContainer):
def __init__(self, policy, config, model_config, layer_id):
super().__init__(policy, config, model_config, layer_id)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronMoELayerPolicy(MegatronLayerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
num_experts = 1
def __init__(self, client_module, inference=True):
super().__init__(inference)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronMoELayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronMoELayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronMoELayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronMoELayerPolicy._orig_layer_class = None
def get_num_experts(self):
return self.num_experts
def mlp(self, moe_type='standard', enable_training=False):
# for now, all of this is tightly coupled to megatron-deepspeed moe implementation
# todo: think and refactor this to be more general
#from deepspeed.moe.utils import has_moe_layers
#moe, _ = has_moe_layers(self.client_module)
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
self.num_experts = num_experts
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import MetaTensorContainer, HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
from deepspeed.utils.types import ActivationFuncType
class DS_OPTContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedOPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.fc1,
self.policy.client_module.fc2,
self.policy.client_module.self_attn.q_proj,
self.policy.client_module.self_attn.k_proj,
self.policy.client_module.self_attn.v_proj,
self.policy.client_module.self_attn.out_proj,
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = self.policy.client_module.self_attn.q_proj.bias
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = self.policy.client_module.self_attn.k_proj.bias
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = self.policy.client_module.self_attn.v_proj.bias
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.q_proj.bias', \
'self_attn.k_proj.bias', \
'self_attn.v_proj.bias', \
'self_attn.out_proj.weight', \
'self_attn.out_proj.bias', \
'fc1.weight', \
'fc1.bias', \
'fc2.weight', \
'fc2.bias', \
'final_layer_norm.weight', \
'final_layer_norm.bias', \
'self_attn_layer_norm.weight', \
'self_attn_layer_norm.bias'
)
for i in range(0, 6, 3):
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i // 3],
[prefix + param_names[i], prefix + param_names[i + 1], prefix + param_names[i + 2]],
split_qkv=self.policy.split_qkv)
for i in range(6, 8):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(8, 14):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(14, 16):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
class HFOPTLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True, use_load_prefix=True):
super().__init__(inference, linear_layer=True, pre_attn_norm=True, use_load_prefix=use_load_prefix)
self.client_module = client_module
try:
import transformers
HFOPTLayerPolicy._orig_layer_class = transformers.models.opt.modeling_opt.OPTDecoderLayer
except:
HFOPTLayerPolicy._orig_layer_class = None
if hasattr(TransformerPolicy, "hf_model_config") and hasattr(TransformerPolicy.hf_model_config,
"activation_function"):
if TransformerPolicy.hf_model_config.activation_function == "relu":
self.mlp_act_func_type = ActivationFuncType.ReLU
elif TransformerPolicy.hf_model_config.activation_function in ["gelu", "gelu_new"]:
self.mlp_act_func_type = ActivationFuncType.GELU
else:
raise ValueError("Unsupported activation function: {}".format(
TransformerPolicy.hf_model_config.activation_function))
else:
self.mlp_act_func_type = ActivationFuncType.ReLU # default
def get_hidden_heads(self):
return self.client_module.self_attn.embed_dim, \
self.client_module.self_attn.num_heads, \
self.client_module.self_attn_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.fc1.weight, \
self.client_module.fc1.bias, \
self.client_module.fc2.weight, \
self.client_module.fc2.bias
def layernorm(self):
return self.client_module.final_layer_norm.weight, \
self.client_module.final_layer_norm.bias, \
self.client_module.self_attn_layer_norm.weight, \
self.client_module.self_attn_layer_norm.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Create a container object to save model-specific tensors using the policy file above.
from .base import *
from deepspeed import comm as dist
import deepspeed.ops.transformer as transformer_inference
from deepspeed.accelerator import get_accelerator
class BaseTransformerMoEContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
# Call the init function of the parent class to initialize the tensors and configs from parent class
super().__init__(**kwargs)
self.num_experts = self.policy.get_num_experts()
self.ep_world_size = dist.get_world_size()
self.local_ep_size = 1 if self.num_experts < self.ep_world_size else self.num_experts // self.ep_world_size
self.layer_norm_eps = self.config.layer_norm_eps if hasattr(self.config, 'layer_norm_eps') else 1e-12,
# MoE models will have a list of mlp related tensors
self._h4h_w = []
self._h4h_b = []
self._4hh_w = []
self._4hh_b = []
# Residual MoE needs extra parameters
self._res_h4h_w = None
self._res_h4h_b = None
self._res_4hh_w = None
self._res_4hh_b = None
self._res_coef = None
def create_ds_model_config(self):
self.set_hidden_heads(*self.policy.get_hidden_heads())
assert self.num_attention_heads % self.mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
self.ds_model_config = transformer_inference.DeepSpeedMoEInferenceConfig(
hidden_size=self.hidden_size,
heads=self.num_attention_heads,
layer_norm_eps=self.layer_norm_eps,
fp16=self.fp16,
pre_layer_norm=self.pre_layer_norm,
mp_size=self.mp_size,
q_int8=self.quantize,
moe_experts=self.local_ep_size,
global_experts=self.num_experts,
mlp_type=self.config.moe.type,
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
)
return self.ds_model_config
def initialize_tensors(self):
# Set the tensors from policy (user module) to container (DS module)
self.set_attention(*self.policy.attention())
self.set_mlp(self.config.moe.type)
self.set_layernorm(*self.policy.layernorm())
def set_mlp(self, config_moe_type):
if config_moe_type == 'standard':
self._h4h_w, self._h4h_b, \
self._4hh_w, self._4hh_b = self.policy.mlp()
else:
self._h4h_w, self._h4h_b, self._4hh_w, \
self._4hh_b, self._res_h4h_w, self._res_h4h_b, \
self._res_4hh_w, self._res_4hh_b, \
self._res_coef = self.policy.mlp(config_moe_type)
def transpose(self):
self.transpose_attention()
self.transpose_mlp()
if self.config.moe.type == 'residual':
self.transpose_residual()
def transpose_mlp(self):
self._h4h_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._h4h_w]
self._4hh_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._4hh_w]
def transpose_residual(self):
self._res_h4h_w.data = self.transpose_impl(self._res_h4h_w.data)
self._res_4hh_w.data = self.transpose_impl(self._res_4hh_w.data)
self._res_coef.data = self.transpose_impl(self._res_coef.data)
def apply_tensor_parallelism(self, mp_replace):
# setup the new Attention module
self.attention_qkv_mp(mp_replace)
self.attention_o_mp(mp_replace)
# quantize attention weights
self.attention_quantization()
# setup the new MLP module
self.mlp_mp()
def mlp_mp(self):
gpu_index = dist.get_rank()
for ep_index in range(self.local_ep_size):
# mlp inter
self.module.mlp[ep_index].inter_w.data = self._h4h_w[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
self.module.mlp[ep_index].inter_b.data = self._h4h_b[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
# mlp output
self.module.mlp[ep_index].output_w.data = self._4hh_w[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
self.module.mlp[ep_index].output_b.data = self._4hh_b[gpu_index * self.local_ep_size + ep_index].to(
get_accelerator().current_device_name())
def copy_data_to_new_module(self):
self.module.attn_nw.data = self.attn_nw.to(get_accelerator().current_device_name())
self.module.attn_nb.data = self.attn_nb.to(get_accelerator().current_device_name())
self.module.norm_w.data.copy_(self.input_nw.to(get_accelerator().current_device_name()))
self.module.norm_b.data.copy_(self.input_nb.to(get_accelerator().current_device_name()))
if self.config.moe.type == 'residual':
self.module.res_mlp.inter_w.data = self._res_h4h_w.to(get_accelerator().current_device_name())
self.module.res_mlp.inter_b.data = self._res_h4h_b.to(get_accelerator().current_device_name())
self.module.res_mlp.output_w.data = self._res_4hh_w.to(get_accelerator().current_device_name())
self.module.res_mlp.output_b.data = self._res_4hh_b.to(get_accelerator().current_device_name())
self.module.res_coef.data = self._res_coef.to(get_accelerator().current_device_name())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import HybridSplitQKVContainer, HybridGatedMLPContainer
from deepspeed.utils.types import ActivationFuncType, NormType
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import (
TransformerPolicy,
transformer_param_names,
maybe_copy,
maybe_copy_qkv,
maybe_copy_geglu,
maybe_get_lora,
)
class DS_LLAMAContainer(HybridGatedMLPContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
_config.rotate_half = True
_config.rotate_every_two = False
_config.rotary_dim = self.hidden_size // self.num_attention_heads
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.up_proj.weight, self.policy.client_module.mlp.gate_proj.weight,
self.policy.client_module.mlp.down_proj.weight, self.policy.client_module.self_attn.q_proj.weight,
self.policy.client_module.self_attn.k_proj.weight, self.policy.client_module.self_attn.v_proj.weight,
self.policy.client_module.self_attn.o_proj.weight
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = None
def set_mlp_gate(self):
"""
Necessary to implement for `HybridGatedMLPContainer`
"""
self.inter_up_w = self.policy.client_module.mlp.up_proj.weight
self.inter_up_b = None
self.inter_gate_w = self.policy.client_module.mlp.gate_proj.weight
self.inter_gate_b = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.o_proj.weight', \
'mlp.up_proj.weight', \
'mlp.gate_proj.weight', \
'mlp.down_proj.weight', \
'input_layernorm.weight', \
'post_attention_layernorm.weight'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w',
[prefix + param_names[4], prefix + param_names[5]])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7])
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8])
class LLAMALayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(
inference,
mlp_act_func_type=ActivationFuncType.GATED_SILU,
norm_type=NormType.RMSNorm,
)
self.client_module = client_module
try:
import transformers
LLAMALayerPolicy._orig_layer_class = transformers.models.llama.modeling_llama.LlamaDecoderLayer # type: ignore
except:
LLAMALayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.input_layernorm.variance_epsilon, \
self.client_module.mlp.gate_proj.weight.shape[0]
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
kw = self.client_module.self_attn.k_proj.weight
vw = self.client_module.self_attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.self_attn.o_proj.weight, \
None
def mlp(self, enable_training=False):
mlp1_up = self.client_module.mlp.up_proj.weight
mlp1_gate = self.client_module.mlp.gate_proj.weight
mlp2 = self.client_module.mlp.down_proj.weight
mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training)
return mlp1, None, mlp2, None
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
None, \
self.client_module.input_layernorm.weight, \
None
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.hybrid_engine import HybridEngineContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTNEOContainer(MetaTensorContainer, HybridEngineContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.c_fc, self.policy.client_module.mlp.c_proj,
self.policy.client_module.attn.attention.q_proj, self.policy.client_module.attn.attention.k_proj,
self.policy.client_module.attn.attention.v_proj, self.policy.client_module.attn.attention.out_proj
]
]
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.attention.q_proj.weight', \
'attn.attention.k_proj.weight', \
'attn.attention.v_proj.weight', \
'attn.attention.out_proj.weight', \
'attn.attention.out_proj.bias', \
'mlp.c_fc.weight', \
'mlp.c_fc.bias', \
'mlp.c_proj.weight', \
'mlp.c_proj.bias', \
'ln_2.weight', \
'ln_2.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 5):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(5, 11):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(11, 13):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
class HFGPTNEOLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=False)
self.client_module = client_module
try:
import transformers
HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock
except:
HFGPTNEOLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.attention.embed_dim, \
self.client_module.attn.attention.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def get_q_k_v(self):
return self.client_module.attn.attention.q_proj.weight, \
None, \
self.client_module.attn.attention.k_proj.weight, \
None, \
self.client_module.attn.attention.v_proj.weight, \
None
def attention(self, enable_training=False):
qw = self.client_module.attn.attention.q_proj.weight
kw = self.client_module.attn.attention.k_proj.weight
vw = self.client_module.attn.attention.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.attention.out_proj.weight, \
self.client_module.attn.attention.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from ..policy import TransformerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTContainer(MegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
megatron_v2 = True
use_mup = False
def __init__(self, client_module, inference=True):
super().__init__(inference, megatron_v2=MegatronLayerPolicy.megatron_v2, use_mup=MegatronLayerPolicy.use_mup)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronLayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.query_key_value.weight.shape[1], \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if self.inference:
if MegatronLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, moe_type='standard', enable_training=False):
from deepspeed.moe.utils import has_moe_layers
moe, _ = has_moe_layers(self.client_module)
if moe:
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
else:
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_BERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.return_tuple = True
self.triangular_masking = False
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFBertLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=False)
self.client_module = client_module
self.cuda_graph_supported = True
if HFBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFBertLayerPolicy._orig_layer_class = [
transformers.models.bert.modeling_bert.BertLayer,
transformers.models.roberta.modeling_roberta.RobertaLayer
]
except:
HFBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
return self.client_module.attention.self.query.weight.shape[1], \
self.client_module.attention.self.num_attention_heads, \
attention_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.self.query.weight
qb = self.client_module.attention.self.query.bias
kw = self.client_module.attention.self.key.weight
kb = self.client_module.attention.self.key.bias
vw = self.client_module.attention.self.value.weight
vb = self.client_module.attention.self.value.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.output.dense.weight, \
self.client_module.attention.output.dense.bias, \
def mlp(self, enable_training=False):
if self.pre_attn_norm:
intermediate_ff = self.client_module.intermediate.dense_act
else:
intermediate_ff = self.client_module.intermediate.dense
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.output.dense.weight, \
self.client_module.output.dense.bias
def layernorm(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
transformer_layernorm = self.client_module.PreAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
transformer_layernorm = self.client_module.output.LayerNorm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_CLIPContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFCLIPLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=True, scale_attention=True)
self.client_module = client_module
self.cuda_graph_supported = True
if HFCLIPLayerPolicy._orig_layer_class is None:
try:
import transformers
HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer
except:
HFCLIPLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.layer_norm1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.fc1.weight, \
self.client_module.mlp.fc1.bias, \
self.client_module.mlp.fc2.weight, \
self.client_module.mlp.fc2.bias
def layernorm(self):
return self.client_module.layer_norm2.weight, \
self.client_module.layer_norm2.bias, \
self.client_module.layer_norm1.weight, \
self.client_module.layer_norm1.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.hybrid_megatron import HybridMegatronContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from packaging import version as pkg_version
from ..policy import maybe_get_lora
class DS_GPTNEOXContainer(MetaTensorContainer, HybridMegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
if GPTNEOXLayerPolicy.version == 0:
attention = self.policy.client_module.attention
else:
attention = self.policy.client_module.self_attention
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h,
attention.query_key_value, attention.dense
]
]
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attention.query_key_value.weight', \
'attention.query_key_value.bias', \
'attention.dense.weight', \
'attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
'input_layernorm.weight', \
'input_layernorm.bias'
)
for i in range(0, 2):
maybe_copy(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i],
prefix + param_names[i],
qkv=True,
megatron_v2=self.policy.is_megatron_v2,
split_qkv=self.policy.split_qkv,
heads=self.policy.client_module.attention.num_attention_heads)
for i in range(2, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(4, 10):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(10, 12):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
class GPTNEOXLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
def __init__(self, client_module, inference=True, megatron_v2=True, split_qkv=False):
super().__init__(inference, megatron_v2=megatron_v2, split_qkv=split_qkv)
self.client_module = client_module
if GPTNEOXLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
GPTNEOXLayerPolicy._orig_layer_class = None
else:
try:
from transformers import GPTNeoXLayer
GPTNEOXLayerPolicy._orig_layer_class = GPTNeoXLayer
except ImportError:
GPTNEOXLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return self.client_module.attention.hidden_size, \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
def get_lora_params(self):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
all_lora_params = []
for p in [
self.client_module.mlp.dense_h_to_4h, \
self.client_module.mlp.dense_4h_to_h, \
attention.query_key_value, \
attention.dense
]:
all_lora_params.append(maybe_get_lora(p))
return all_lora_params
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Create a container object to save model-specific tensors using the policy file above.
from abc import ABC
import torch
from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig
from deepspeed.accelerator import get_accelerator
# If the intermediate size attribute is set DEFAULT_INTERMEDIATE_SIZE
# it is assumed the intermediate size is 4x the embedding dimension
DEFAULT_INTERMEDIATE_SIZE = -1
class BaseConvolutionContainer(ABC):
# not implemented
def __init__(self):
pass
class BaseTransformerContainer(ABC):
def __init__(self, policy, config, model_config, layer_id, child):
self.policy = policy
self.config = config
self.model_config = model_config
self.layer_id = layer_id
self.child = child
self.megatron_v2 = self.policy.is_megatron_v2
self.scale_attention = self.policy.scale_attention
self.ckpt_load_enabled = False
# configuration for models. todo: can this be moved to a pydantic model config?
self.hidden_size = None
self.intermediate_size = None
self.num_attention_heads = None
self.mp_size = self.config.tensor_parallel.tp_size
self.pre_layer_norm = self.model_config.do_layer_norm_before if \
hasattr(self.model_config, 'do_layer_norm_before') else self.policy.pre_attn_norm
self.dtype = self.config.dtype
self.attn_linear_layer = self.policy.linear_layer
self.mlp_linear_layer = self.policy.linear_layer
self.return_tuple = self.config.return_tuple
self.triangular_masking = True
self.local_attention = ((self.model_config.attention_layers[self.layer_id] == "local") if hasattr(
self.model_config, 'attention_layers') else False)
self.window_size = getattr(self.model_config, "window_size", 1)
self.mlp_act_func_type = self.policy.mlp_act_func_type
self.norm_type = self.policy.norm_type
self.training_mp_size = self.config.training_mp_size
self.bigscience_bloom = False
self.max_out_tokens = self.config.max_out_tokens
self.min_out_tokens = self.config.min_out_tokens
self.scale_attn_by_inverse_layer_idx = getattr(self.config, "scale_attn_by_inverse_layer_idx", False)
self.use_mup = self.policy.use_mup
self.return_single_tuple = False
self.rotary_dim = self.get_rotary_dim()
self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0)
# Attention tensors
self.qkvw = None
self.qkvb = None
self.dense_w = None
self.dense_b = None
# MLP tensors
self._h4h_w = None
self._h4h_b = None
self._4hh_w = None
self._4hh_b = None
# LayerNorm tensors
self.attn_nw = None
self.attn_nb = None
self.input_nw = None
self.input_nb = None
self.mp_group = None
def create_ds_model_config(self):
self.set_hidden_heads(*self.policy.get_hidden_heads())
assert self.num_attention_heads % self.mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
self.ds_model_config = DeepSpeedInferenceConfig(
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
heads=self.num_attention_heads,
layer_norm_eps=self.layernorm_epsilon,
dtype=self.dtype,
pre_layer_norm=self.pre_layer_norm,
norm_type=self.norm_type,
mp_size=self.mp_size,
return_tuple=self.return_tuple,
triangular_masking=self.triangular_masking,
local_attention=self.local_attention,
window_size=self.window_size,
rotary_dim=self.rotary_dim,
mlp_after_attn=self.mlp_after_attn,
mlp_act_func_type=self.mlp_act_func_type,
training_mp_size=self.training_mp_size,
bigscience_bloom=self.bigscience_bloom,
max_out_tokens=self.max_out_tokens,
min_out_tokens=self.min_out_tokens,
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
use_mup=self.use_mup,
return_single_tuple=self.return_single_tuple,
set_empty_params=self.config.set_empty_params,
transposed_mode=self.config.transposed_mode)
return self.ds_model_config
def check_meta_tensor_support(self):
if hasattr(self.qkvw, 'is_meta'):
if self.qkvw.is_meta:
assert self.ckpt_load_enabled, "Meta tensors are not supported for this model currently."
else:
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
def initialize_tensors(self, enable_training=False):
# Set the tensors from policy (user module) to container (DS module)
self.set_attention(*self.policy.attention(enable_training=enable_training))
self.set_mlp(*self.policy.mlp(enable_training=enable_training))
self.set_layernorm(*self.policy.layernorm())
self.check_meta_tensor_support()
def convert_to_required_dtype(self):
# Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy
if self.dtype in [torch.half, torch.bfloat16]:
for k, v in self.__dict__.items():
# The list comprehension is used for MoE tensor lists
if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \
or isinstance(tensor, torch.nn.Parameter)) for tensor in v):
self.__dict__[k] = [moe_tensor.to(self.dtype) for moe_tensor in v]
if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter):
self.__dict__[k] = v.to(self.dtype)
def get_rotary_dim(self):
if hasattr(self.model_config, 'rotary_dim'):
return self.model_config.rotary_dim
if hasattr(self.child, 'attention') and hasattr(self.child.attention, 'rotary_ndims'):
return self.child.attention.rotary_ndims
return -1
def set_moe(self, moe=False):
self.moe = moe
def set_tensor_parallel_config(self, mp_size, mp_group):
self.mp_size = mp_size
self.mp_group = mp_group
def set_quantization_config(self, quantizer):
self.quantizer = quantizer
def set_hidden_heads(self, hidden_size, num_attention_heads, epsilon, intermediate_size):
"""
Args:
hidden_size: embedding dimension of the model
num_attention_heads: number of attention heads in the model
epsilon: epsilon value for layer norm (same value used for all norms)
intermediate_size: Size of MLP projection. If `DEFAULT_INTERMEDIATE_SIZE` is passed
it is assumed to be `4 * hidden_size`
"""
self.hidden_size = hidden_size
if intermediate_size == DEFAULT_INTERMEDIATE_SIZE:
self.intermediate_size = 4 * hidden_size
else:
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.layernorm_epsilon = epsilon
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
self.qkvw = qkvw
self.qkvb = qkvb
self.dense_w = dense_w
self.dense_b = dense_b
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
self._h4h_w = _h4h_w
self._h4h_b = _h4h_b
self._4hh_w = _4hh_w
self._4hh_b = _4hh_b
def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb):
self.attn_nw = attn_nw
self.attn_nb = attn_nb
self.input_nw = input_nw
self.input_nb = input_nb
def apply_weight_quantization(self):
# quantize attention weights
self.attention_quantization()
# quantize mlp weights
self.mlp_quantization()
def attention_quantization(self):
self.module.attention.attn_qkvw = self.quantizer.quantize(self.module.attention.attn_qkvw)
self.module.attention.attn_ow = self.quantizer.quantize(self.module.attention.attn_ow)
def mlp_quantization(self):
self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w)
self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w)
def apply_tensor_parallelism(self, mp_replace):
# setup the new Attention module
self.attention_qkv_mp(mp_replace)
self.attention_o_mp(mp_replace)
# setup the new MLP module
self.mlp_inter_mp(mp_replace)
self.mlp_output_mp(mp_replace)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_qkvw = mp_replace.strided_copy(self.module.attention.attn_qkvw,
self.qkvw,
num_splits=3,
int8=reversed_dim)
self.module.attention.attn_qkvb = mp_replace.strided_copy(self.module.attention.attn_qkvb,
self.qkvb,
num_splits=3,
int8=reversed_dim)
def attention_o_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, self.dense_w, int8=reversed_dim)
self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob,
self.dense_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w, int8=reversed_dim)
self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b, int8=reversed_dim)
def mlp_output_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w, int8=reversed_dim)
self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b,
self._4hh_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def copy_data_to_new_module(self):
params = {
self.module.mlp.attn_nw: self.attn_nw,
self.module.mlp.attn_nb: self.attn_nb,
self.module.norm_w: self.input_nw,
self.module.norm_b: self.input_nb
}
for dst, src in params.items():
if src is None:
dst = src
else:
dst.data.copy_(src.to(get_accelerator().current_device_name()))
def transpose(self):
self.transpose_attention()
self.transpose_mlp()
def transpose_attention(self):
if self.attn_linear_layer:
self.qkvw = self.transpose_impl(self.qkvw.data)
self.dense_w = self.transpose_impl(self.dense_w.data)
def transpose_mlp(self):
if self.mlp_linear_layer:
self._h4h_w = self.transpose_impl(self._h4h_w.data)
self._4hh_w = self.transpose_impl(self._4hh_w.data)
def transpose_impl(self, data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
def get_all_params(self):
params = [
self.attn_nw,
self.attn_nb,
self.input_nw,
self.input_nb,
]
params.extend(self.get_attn_params())
params.extend(self.get_mlp_params())
return params
def get_attn_params(self):
return [self.qkvw, self.qkvb, self.dense_w, self.dense_b]
def get_mlp_params(self):
return [self._h4h_w, self._h4h_b, self._4hh_w, self._4hh_b]
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..policy import DSPolicy
from ...model_implementations.diffusers.vae import DSVAE
class VAEPolicy(DSPolicy):
def __init__(self):
super().__init__()
try:
import diffusers
if hasattr(diffusers.models.vae, "AutoencoderKL"):
self._orig_layer_class = diffusers.models.vae.AutoencoderKL
else:
# Diffusers >= 0.12.0 changes location of AutoencoderKL
self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL
except ImportError:
self._orig_layer_class = None
def match(self, module):
return isinstance(module, self._orig_layer_class)
def match_replaced(self, module):
return isinstance(module, DSVAE)
def apply(self, module, enable_cuda_graph=True):
# TODO(cmikeh2): Enable cuda graph should be an inference configuration
return DSVAE(module, enable_cuda_graph=enable_cuda_graph)
# NOTE (lekurile): Should we have a diffusers policy class?
def attention(self):
pass
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_DistilBERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.triangular_masking = False
self.return_single_tuple = True
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFDistilBertLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=False, preln=False):
super().__init__(inference)
self.client_module = client_module
self.preln = preln
self.cuda_graph_supported = True
if HFDistilBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFDistilBertLayerPolicy._orig_layer_class = [
transformers.models.distilbert.modeling_distilbert.TransformerBlock,
]
except:
HFDistilBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.q_lin.weight.shape[1], \
self.client_module.attention.n_heads, \
self.client_module.sa_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.q_lin.weight
qb = self.client_module.attention.q_lin.bias
kw = self.client_module.attention.k_lin.weight
kb = self.client_module.attention.k_lin.bias
vw = self.client_module.attention.v_lin.weight
vb = self.client_module.attention.v_lin.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.out_lin.weight, \
self.client_module.attention.out_lin.bias
def mlp(self, enable_training=False):
intermediate_ff = self.client_module.ffn.lin1
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.ffn.lin2.weight, \
self.client_module.ffn.lin2.bias
def layernorm(self):
attention_layernorm = self.client_module.sa_layer_norm
transformer_layernorm = self.client_module.output_layer_norm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_get_lora
supported_models = {None}
class DS_BloomContainer(MetaTensorContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.bigscience_bloom = True
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBloomInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_qkvw = mp_replace.copy(self.module.attention.attn_qkvw, self.qkvw)
self.module.attention.attn_qkvb = mp_replace.copy(self.module.attention.attn_qkvb, self.qkvb)
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attention.query_key_value.weight', \
'self_attention.query_key_value.bias', \
'self_attention.dense.weight', \
'self_attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
'input_layernorm.weight', \
'input_layernorm.bias'
)
for i in range(0, 2):
maybe_copy(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i],
prefix + param_names[i],
qkv=True,
megatron_v2=self.policy.is_megatron_v2,
split_qkv=self.policy.split_qkv)
for i in range(2, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(4, 10):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(10, 12):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
class BLOOMLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True, use_load_prefix=True, split_qkv=False):
super().__init__(inference, linear_layer=True, use_load_prefix=use_load_prefix, split_qkv=split_qkv)
self.client_module = client_module
try:
import transformers
BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock
global supported_models
supported_models.update({transformers.models.bloom.modeling_bloom.BloomModel})
except Exception as e:
print(f"WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: {e}")
BLOOMLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attention.hidden_size, \
self.client_module.self_attention.num_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
return self.client_module.self_attention.query_key_value.weight, \
self.client_module.self_attention.query_key_value.bias, \
self.client_module.self_attention.dense.weight, \
self.client_module.self_attention.dense.bias,
def mlp(self, enable_training=False):
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
def get_lora_params(self):
all_lora_params = []
for p in [
self.client_module.mlp.dense_h_to_4h, \
self.client_module.mlp.dense_4h_to_h, \
self.client_module.self_attention.query_key_value, \
self.client_module.self_attention.dense
]:
all_lora_params.append(maybe_get_lora(p))
return all_lora_params
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
import torch
from .hybrid_engine import HybridEngineContainer
class HybridSplitQKVContainer(HybridEngineContainer):
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
super().set_attention(qkvw, qkvb, dense_w, dense_b)
self.set_q_k_v()
@abstractmethod
def set_q_k_v(self):
"""
In `set_q_k_v`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.qw: q weight
self.qb: q bias
self.kw: k weight
self.kb: k bias
self.vw: v weight
self.vb: v bias
"""
raise NotImplementedError("A set_q_k_v() function must be defined in the model container \
in order to set the unfused q, k, and v tensors.")
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
# Only need to alter
if self.module.attention.attn_qkvw is None:
params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
for dst, src in params:
dst = mp_replace.copy(
dst[:self.qw.shape[0] // mp_replace.mp_size], src, int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
super().attention_qkv_mp(mp_replace)
def release_qkv(self):
super().release_qkv()
split_qkv_params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
self._release_params(split_qkv_params)
def reset_qkv(self):
self.qkvw.data[:self.qw.shape[0]] = self.qw.data
self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
qkv_data = [self.qw.data, self.kw.data, self.vw.data]
self.qw.data = self.qkvw.data[:self.qw.shape[0]]
self.kw.data = self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.qkvw.data[2 * self.qw.shape[0]:]
if self.qkvb is not None:
self.qkvb.data[:self.qw.shape[0]] = self.qb.data
self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data.extend([self.qb.data, self.kb.data, self.vb.data])
self.qb.data = self.qkvb.data[:self.qw.shape[0]]
self.kb.data = self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def reset_qkv_experimental(self):
"""
WIP - experimental and likely to be changed/improved.
Unused by keeping for now.
"""
if self.module.attention.attn_qkvw is None:
self.module.attention.attn_qkvw = torch.empty(self.qw.shape[0] * 3,
self.qw.shape[0],
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvb = torch.empty(self.qw.shape[0] * 3,
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvw.data[:self.qw.shape[0]] = self.qw.data
self.module.attention.attn_qkvb.data[:self.qw.shape[0]] = self.qb.data
self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data = [self.qw.data, \
self.qb.data, \
self.kw.data, \
self.kb.data, \
self.vw.data, \
self.vb.data]
self.qw.data = self.module.attention.attn_qkvw.data[:self.qw.shape[0]]
self.qb.data = self.module.attention.attn_qkvb.data[:self.qw.shape[0]]
self.kw.data = self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.kb.data = self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:]
self.vb.data = self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def set_attn_parameters_wo_copy(self, Z3_enabled=False):
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
if not Z3_enabled:
# In initialize_tensors, we create a fused qkvw with the appropriate shape
# and copy the qw, qb, kw, kb, vw, vb into it
self.module.attn_qkvw = self.qkvw
self.module.attn_qkvb = self.qkvb
# We reset the data for qw (which is the original model parameter) to point
# to the fused weight matrix we have created here
self.qw.data = self.qkvw[:self.qw.shape[0], :]
self.kw.data = self.qkvw[self.qw.shape[0]:2 * self.qw.shape[0], :]
self.vw.data = self.qkvw[self.qw.shape[0] * 2:, :]
# Assume if one of the biases is not None, then all of them are not None
if self.qb is not None:
self.qb.data = self.qkvb[:self.qw.shape[0]]
self.kb.data = self.qkvb[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb[self.qw.shape[0] * 2:]
else:
# In ZeRO-3 this will be managed by ZeRO and handled separately in the
# forward of ds_attention
self.module.attention.attn_qw = self.qw
self.module.attention.attn_qb = self.qb
self.module.attention.attn_kw = self.kw
self.module.attention.attn_kb = self.kb
self.module.attention.attn_vw = self.vw
self.module.attention.attn_vb = self.vb
def get_attn_params(self):
params = super().get_attn_params()
params.extend([self.qw, self.qb, self.kw, self.kb, self.vw, self.vb])
return params
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from typing import List, Tuple
import torch
class HybridEngineContainer(ABC):
"""
This container identifies which methods need to be overridden in addition to
the base container to enable use in the RLHF pipeline. These methods are not
necessary for inference alone.
NOTE: If you are using this feature with a container that
also inherits from `MetaTensorContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def initialize_tensors(self, enable_training=False):
"""
Same purposes as the base container, but also grabs the hooks for any LoRA
parameters. If it's necessary to override specific sub-components of the model,
it's best to augment the specific `set_[component]` itself rather than modifying
the `initialize_tensors` method. See the `HybridSplitQKVContainer` for an example.
"""
super().initialize_tensors(enable_training=enable_training)
self.set_lora_params()
def transform_for_training(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the inference format to what is necessary for training.
"""
pass
def transform_for_inference(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the training format to what is necessary for inference.
"""
pass
@abstractmethod
def set_lora_params(self):
"""
If available, set the LoRA parameters for the module. An implementation
for this would iterate over all parameters of the model and use the `maybe_get_lora` helper
method to check if the parameter does in fact have any LoRA params.
"""
raise NotImplementedError("A set_lora_params() function must be defined for the relevant parameters.")
def apply_tensor_parallelism(self, mp_replace, reversed_dim=False):
"""
Add support for reversed dim in tensor parallelism. If necessary, override
the called methods to handle partitioned weights (i.e. if qkv is split, override
the `attention_qkv_mp` method). If the model component is not split, it should
be safe to use the default implementation.
"""
# Setup the new Attention module
self.attention_qkv_mp(mp_replace, reversed_dim=reversed_dim)
self.attention_o_mp(mp_replace, reversed_dim=reversed_dim)
# Setup the new MLP module
self.mlp_inter_mp(mp_replace, reversed_dim=reversed_dim)
self.mlp_output_mp(mp_replace, reversed_dim=reversed_dim)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def _release_params(self, param_pairs: List[Tuple[torch.Tensor, torch.Tensor]]):
"""
Helper for `release_[component]` methods. Accepts a list of tuples where the first
element is the module param that needs to be deleted, and the second is the reassignment
from the container.
"""
for module_param, container_param in param_pairs:
if module_param is not None:
del module_param
module_param = container_param
def release_memory(self):
"""
Delete module parameters if they exist and point them back to the container. The primary
purpose of this is for TP-inference with ZeRO-3. In this scenario, we need to delete the
parameters we've created for inference to free their memory.
"""
general_params = [
(self.module.attention.attn_ow, self.dense_w),
(self.module.attention.attn_ob, self.dense_b),
(self.module.attn_nw, self.attn_nw),
(self.module.attn_nb, self.attn_nb),
(self.module.norm_w, self.input_nw),
(self.module.norm_b, self.input_nb),
]
self._release_params(general_params)
self.release_qkv()
self.release_mlp()
def release_qkv(self):
"""
Release for QKV parameters (as well as any aliases).
"""
qkv_params = [
(self.module.attention.attn_qkvw, self.qkvw),
(self.module.attention.attn_qkvb, self.qkvb),
]
self._release_params(qkv_params)
def release_mlp(self):
"""
Release for MLP parameters (as well as any aliases).
"""
mlp_params = [
(self.module.mlp.inter_w, self._h4h_w),
(self.module.mlp.inter_b, self._h4h_b),
(self.module.mlp.output_w, self._4hh_w),
(self.module.mlp.output_b, self._4hh_b),
]
self._release_params(mlp_params)
def reset_params(self):
"""
The purpose of reset params is to get the weights from the FP16 training
copy of the model and copy to them to contiguous inference view. This only needs
to be performed when the container parameters cannot be used directly for inference.
"""
self.reset_qkv()
self.reset_mlp()
def reset_qkv(self):
"""
Perform any necessary resets of the model parameters for the QKV components.
"""
pass
def reset_mlp(self):
"""
Perform any necessary resets of the model parameters for the MLP components.
"""
pass
def get_lora_params(self):
"""
Return a list of all parameters that would have LoRA for the module.
"""
if not hasattr(self, "lora_params"):
self.set_lora_params()
return self.lora_params
def set_params_wo_copy(self, Z3_enabled=False):
"""
Rather than copying into, set the parameters directly. This is necessary to provide
an inexpensive (low-memory-overhead) view onto the FP16 forward weights.
"""
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
self.module.norm_w = self.input_nw
self.module.norm_b = self.input_nb
self.set_attn_params_wo_copy(Z3_enabled=Z3_enabled)
self.set_mlp_params_wo_copy(Z3_enabled=Z3_enabled)
def set_attn_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
self.module.attention.attn_qkvw = self.qkvw
self.module.attention.attn_qkvb = self.qkvb
def set_mlp_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
from .hybrid_engine import HybridEngineContainer
class HybridGatedMLPContainer(HybridEngineContainer):
"""
The HybridGatedMLPContainer supports models for which the first MLP layer
is represented with two separate weights, one for the activation function
and one for the gating function.
"""
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
super().set_mlp(_h4h_w, _h4h_b, _4hh_w, _4hh_b)
self.set_mlp_gate()
@abstractmethod
def set_mlp_gate(self):
"""
In `set_mlp_gate`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.inter_up_w: inter up weight
self.inter_up_b: inter up bias
self.inter_gate_w: inter gate weight
self.inter_gate_b: inter gate bias
If the parameter does not exist in the original model, set the attribute to None.
"""
raise NotImplementedError("A set_mlp_gate() function must be defined in the model container \
in order to set the unfused inter up and gate tensors.")
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
# Only need to alter behavior if we can't do the normal destructive copy
if self.module.mlp.inter_w is None:
params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
for dst, src in params:
dst = mp_replace.copy(dst[:self.inter_up_w.shape[0] // mp_replace.mp_size],
src,
int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
super().mlp_inter_mp(mp_replace)
def release_mlp(self):
super().release_mlp()
gated_mlp_params = [
(self.module.mlp.inter_up_w, self.inter_up_w),
(self.module.mlp.inter_up_b, self.inter_up_b),
(self.module.mlp.inter_gate_w, self.inter_gate_w),
(self.module.mlp.inter_gate_b, self.inter_gate_b),
]
self._release_params(gated_mlp_params)
def reset_mlp(self):
self._h4h_w.data[:self.inter_up_w.shape[0]] = self.inter_up_w.data
self._h4h_w.data[self.inter_up_w.shape[0]:] = self.inter_gate_w.data
if self.inter_up_b is not None:
self._h4h_b.data[:self.inter_up_b.shape[0]] = self.inter_up_b.data
self._h4h_b.data[self.inter_up_b.shape[0]:] = self.inter_gate_b.data
inter_data = [self.inter_up_w.data, self.inter_gate_w.data]
if self.inter_up_b is not None:
inter_data.extend([self.inter_up_b.data, self.inter_gate_b.data])
self.inter_up_w.data = self._h4h_w.data[:self.inter_up_w.shape[0]]
self.inter_gate_w.data = self._h4h_w.data[self.inter_up_w.shape[0]:]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b.data[:self.inter_up_b.shape[0]]
self.inter_gate_b.data = self._h4h_b.data[self.inter_up_b.shape[0]:]
for data in inter_data:
del data
def set_mlp_params_wo_copy(self, Z3_enabled=False):
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
if not Z3_enabled:
# In initialize_tensors, we create a fused inter projection with the appropriate shape
# and copy the up projection and gate projection into it
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.inter_up_w.data = self._h4h_w[:self.inter_up_w.shape[0], :]
self.inter_gate_w.data = self._h4h_w[self.inter_up_w.shape[0]:, :]
if self.inter_up_b is not None:
self.inter_up_b.data = self._h4h_b[:self.inter_up_w.shape[0]] if self._h4h_b is not None else None
self.inter_gate_b.data = self._h4h_b[self.inter_up_w.shape[0]:] if self._h4h_b is not None else None
else:
self.module.inter_up_w = self.inter_up_w
self.module.inter_up_b = self.inter_up_b
self.module.inter_gate_w = self.inter_gate_w
self.module.inter_gate_b = self.inter_gate_b
def get_mlp_params(self):
params = super().get_mlp_params()
params.extend([self.inter_up_w, self.inter_up_b, self.inter_gate_w, self.inter_gate_b])
return params
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from packaging import version as pkg_version
import torch
class MetaTensorContainer(ABC):
"""
NOTE: If you are using this feature with a container that
also inherits from `HybridEngineContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def __init__(self, **kwargs):
if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__):
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
super().__init__(**kwargs)
self.is_meta = False
self.ckpt_load_enabled = True
def initialize_tensors(self, enable_training=False):
super().initialize_tensors(enable_training=enable_training)
self.is_meta = self.qkvw.is_meta
def apply_tensor_parallelism(self, mp_replace, **kwargs):
if self.is_meta:
if self.qkvb is None:
self.module.attention.attn_qkvb = None
if self.dense_b is None:
self.module.attention.attn_ob = None
else:
super().apply_tensor_parallelism(mp_replace, **kwargs)
def copy_data_to_new_module(self):
if self.is_meta:
if self.attn_nw is None:
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
else:
super().copy_data_to_new_module()
def transpose(self):
if not self.is_meta:
super().transpose()
@abstractmethod
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
"""
Load all the transformer parameter from the checkpoint file (sd).
In addition to the parameter names, we require two
more parameters to help read the the data correctly
from the checkpoint and split the qkv heads in the
right order:
1. `use_load_prefix` (Default: False): this specifies
whether we need to use the name of first abstraction
layer of the model for searching the parameter's name
in a checkpoint file. For more information of how this
is used please see
https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/load_checkpoint.py
2. `split_qkv` (Default: True): we use this flag when splitting
the qkv parameter into heads. If it is False, it means the heads
of q, k, and v are stored together and needs to split in the
DeepSpeed-Inference API.
"""
raise NotImplementedError("A load_params() function must be defined in the model container \
when inheriting the MetaTensorContainer feature")
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .gated_mlp import HybridGatedMLPContainer
from .megatron import MegatronContainer
from .meta_tensor import MetaTensorContainer
from .split_qkv import HybridSplitQKVContainer
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .hybrid_engine import HybridEngineContainer
from .megatron import MegatronContainer
class HybridMegatronContainer(MegatronContainer, HybridEngineContainer):
def _align_qkv(self, x: torch.Tensor):
"""
Internal helper for accepting the head-contiguous weight matrix and chunking
the query, key, and value components.
"""
attention_head_size = x.shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
x_1 = x.view(*new_x_shape)
div_dim = len(x_1.size()) - 2 if len(x.shape) == 2 else -1
(q, k, v) = torch.split(x_1, (x_1.shape[div_dim] // 3), dim=div_dim)
if len(q.shape) > 2:
x.data.copy_(
torch.cat((q.reshape(-1, q.shape[-1]), k.reshape(-1, q.shape[-1]), v.reshape(-1, q.shape[-1])),
dim=0).reshape(x.shape))
else:
x.data.copy_(torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape))
def transform_for_inference(self) -> None:
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. Here, we take the default
layout and transform it to the inference layout.
"""
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
else:
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
def _partition_qkv(self, x: torch.Tensor):
"""
Internal helper for taking contiguous QKV and partitioning it for contiguous
heads.
"""
q_k_v = torch.split(x, (x.shape[0] // 3), dim=0)
attention_head_size = q_k_v[0].shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
q, k, v = [data.view(*new_x_shape) for data in q_k_v]
if len(q.shape) > 2:
x.data.copy_(torch.cat((q, k, v), dim=-2).reshape(-1, q.shape[-1]))
else:
x.data.copy_(torch.cat((q, k, v), dim=-1).reshape(-1))
def transform_for_training(self):
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. This function takes the inference format and reverts it back to the default format.
"""
# If parameter is distributed, handle gathering it
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
else:
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from abc import ABC
class MegatronContainer(ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.megatron_v2 = self.policy.is_megatron_v2
def _align_qkv_transposed(self, x):
attention_head_size = x.shape[-1] // self.num_attention_heads
new_x_shape = x.size()[:-1] + (self.num_attention_heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0], -1), k.reshape(q.shape[0], -1), v.reshape(q.shape[0], -1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
def transpose(self):
super().transpose()
if self.megatron_v2:
self.qkvw = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvw).contiguous())
self.qkvb = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvb).contiguous())
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .monitor import Monitor
import os
import deepspeed.comm as dist
class csvMonitor(Monitor):
def __init__(self, csv_config):
super().__init__(csv_config)
self.filenames = []
self.enabled = csv_config.enabled
self.output_path = csv_config.output_path
self.job_name = csv_config.job_name
self.log_dir = self.setup_log_dir()
def setup_log_dir(self, base=os.path.join(os.path.expanduser("~"), "csv_monitor")):
if self.enabled and dist.get_rank() == 0:
if self.output_path is not None:
log_dir = os.path.join(self.output_path, self.job_name)
# NOTE: This code path currently is never used since the default tensorboard_output_path is an empty string and not None. Saving it in case we want this functionality in the future.
else:
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
csv_monitor_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, csv_monitor_dir_name, self.job_name)
os.makedirs(log_dir, exist_ok=True)
return log_dir
def write_events(self, event_list):
if self.enabled and dist.get_rank() == 0:
import csv
# We assume each event_list element is a tensorboard-style tuple in the format: (log_name: String, value, step: Int)
for event in event_list:
log_name = event[0]
value = event[1]
step = event[2]
# Set the header to the log_name
# Need this check because the deepspeed engine currently formats log strings to separate with '/'
if '/' in log_name:
record_splits = log_name.split('/')
header = record_splits[len(record_splits) - 1]
else:
header = log_name
# sanitize common naming conventions into filename
filename = log_name.replace('/', '_').replace(' ', '_')
fname = self.log_dir + '/' + filename + '.csv'
# Open file and record event. Insert header if this is the first time writing
with open(fname, 'a+') as csv_monitor_file:
csv_monitor_writer = csv.writer(csv_monitor_file)
if filename not in self.filenames:
self.filenames.append(filename)
csv_monitor_writer.writerow(['step', header])
csv_monitor_writer.writerow([step, value])
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import root_validator
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
def get_monitor_config(param_dict):
monitor_dict = {key: param_dict.get(key, {}) for key in ("tensorboard", "wandb", "csv_monitor")}
return DeepSpeedMonitorConfig(**monitor_dict)
class TensorBoardConfig(DeepSpeedConfigModel):
"""Sets parameters for TensorBoard monitor."""
enabled: bool = False
""" Whether logging to Tensorboard is enabled. Requires `tensorboard` package is installed. """
output_path: str = ""
"""
Path to where the Tensorboard logs will be written. If not provided, the
output path is set under the training script’s launching path.
"""
job_name: str = "DeepSpeedJobName"
""" Name for the current job. This will become a new directory inside `output_path`. """
class WandbConfig(DeepSpeedConfigModel):
"""Sets parameters for WandB monitor."""
enabled: bool = False
""" Whether logging to WandB is enabled. Requires `wandb` package is installed. """
group: str = None
""" Name for the WandB group. This can be used to group together runs. """
team: str = None
""" Name for the WandB team. """
project: str = "deepspeed"
""" Name for the WandB project. """
class CSVConfig(DeepSpeedConfigModel):
"""Sets parameters for CSV monitor."""
enabled: bool = False
""" Whether logging to local CSV files is enabled. """
output_path: str = ""
"""
Path to where the csv files will be written. If not provided, the output
path is set under the training script’s launching path.
"""
job_name: str = "DeepSpeedJobName"
""" Name for the current job. This will become a new directory inside `output_path`. """
class DeepSpeedMonitorConfig(DeepSpeedConfigModel):
"""Sets parameters for various monitoring methods."""
tensorboard: TensorBoardConfig = {}
""" TensorBoard monitor, requires `tensorboard` package is installed. """
wandb: WandbConfig = {}
""" WandB monitor, requires `wandb` package is installed. """
csv_monitor: CSVConfig = {}
""" Local CSV output of monitoring data. """
@root_validator
def check_enabled(cls, values):
values["enabled"] = False
if (values.get("tensorboard").enabled or values.get("wandb").enabled or values.get("csv_monitor").enabled):
values["enabled"] = True
return values
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Support different forms of monitoring such as wandb and tensorboard
"""
from abc import ABC, abstractmethod
import deepspeed.comm as dist
class Monitor(ABC):
@abstractmethod
def __init__(self, monitor_config):
self.monitor_config = monitor_config
@abstractmethod
def write_events(self, event_list):
pass
from .wandb import WandbMonitor
from .tensorboard import TensorBoardMonitor
from .csv_monitor import csvMonitor
class MonitorMaster(Monitor):
def __init__(self, monitor_config):
super().__init__(monitor_config)
self.tb_monitor = None
self.wandb_monitor = None
self.csv_monitor = None
self.enabled = monitor_config.enabled
if dist.get_rank() == 0:
if monitor_config.tensorboard.enabled:
self.tb_monitor = TensorBoardMonitor(monitor_config.tensorboard)
if monitor_config.wandb.enabled:
self.wandb_monitor = WandbMonitor(monitor_config.wandb)
if monitor_config.csv_monitor.enabled:
self.csv_monitor = csvMonitor(monitor_config.csv_monitor)
def write_events(self, event_list):
if dist.get_rank() == 0:
if self.tb_monitor is not None:
self.tb_monitor.write_events(event_list)
if self.wandb_monitor is not None:
self.wandb_monitor.write_events(event_list)
if self.csv_monitor is not None:
self.csv_monitor.write_events(event_list)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_wandb_availability
from .monitor import Monitor
import deepspeed.comm as dist
class WandbMonitor(Monitor):
def __init__(self, wandb_config):
super().__init__(wandb_config)
check_wandb_availability()
import wandb
self.enabled = wandb_config.enabled
self.group = wandb_config.group
self.team = wandb_config.team
self.project = wandb_config.project
if self.enabled and dist.get_rank() == 0:
wandb.init(project=self.project, group=self.group, entity=self.team)
def log(self, data, step=None, commit=None, sync=None):
if self.enabled and dist.get_rank() == 0:
import wandb
return wandb.log(data, step=step, commit=commit, sync=sync)
def write_events(self, event_list):
if self.enabled and dist.get_rank() == 0:
for event in event_list:
label = event[0]
value = event[1]
step = event[2]
self.log({label: value}, step=step)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
def check_tb_availability():
try:
# torch.utils.tensorboard will fail if `tensorboard` is not available,
# see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html
import tensorboard # noqa: F401
except ImportError:
print('If you want to use tensorboard logging, please `pip install tensorboard`')
raise
def check_wandb_availability():
try:
import wandb # noqa: F401
except ImportError:
print(
'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart'
)
raise
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_tb_availability
from .monitor import Monitor
import os
import deepspeed.comm as dist
class TensorBoardMonitor(Monitor):
def __init__(self, tensorboard_config):
super().__init__(tensorboard_config)
check_tb_availability()
self.summary_writer = None
self.enabled = tensorboard_config.enabled
self.output_path = tensorboard_config.output_path
self.job_name = tensorboard_config.job_name
if self.enabled and dist.get_rank() == 0:
self.get_summary_writer()
def get_summary_writer(self, base=os.path.join(os.path.expanduser("~"), "tensorboard")):
if self.enabled and dist.get_rank() == 0:
from torch.utils.tensorboard import SummaryWriter
if self.output_path is not None:
log_dir = os.path.join(self.output_path, self.job_name)
# NOTE: This code path currently is never used since the default output_path is an empty string and not None. Saving it in case we want this functionality in the future.
else:
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, self.output_path)
os.makedirs(log_dir, exist_ok=True)
self.summary_writer = SummaryWriter(log_dir=log_dir)
return self.summary_writer
def write_events(self, event_list, flush=True):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
for event in event_list:
self.summary_writer.add_scalar(*event)
if flush:
self.summary_writer.flush()
def flush(self):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
self.summary_writer.flush()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Dict
import torch
from .reshape_3d_utils import model_3d_desc
from .reshape_utils import (basic_folder_validation, merge_state, partition_data, get_files, get_files_with_prefix)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import reshape_meg_2d_parallel, meg_2d_parallel_map
from .zero_checkpoint import ZeROCheckpoint
from .constants import *
EMBEDDING_LAYER_INDEX = 0
FINAL_LAYER_NORM_INDEX = -1
ARGS_KEY = 'args'
CHECKPOINT_INFO_KEY = 'checkpoint_info'
ITERATION_KEY = 'iteration'
SEQUENTIAL_LAYERS = [
'input_layernorm.weight', 'input_layernorm.bias', 'self_attention.dense.bias', 'post_attention_layernorm.weight',
'post_attention_layernorm.bias', 'mlp.dense_4h_to_h.bias', 'position_embeddings.weight'
]
LAYER_CONCAT_DIM = {'self_attention.dense.weight': 1, 'mlp.dense_4h_to_h.weight': 1}
class DeepSpeedCheckpoint(object):
def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None):
self.dir = dir
self._validate_folder(dir)
self.zero_checkpoint = ZeROCheckpoint(dir)
self.file_list = get_files(dir)
self.layer_files = get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX)
self.mp_rank_files = get_files_with_prefix(self.file_list, MODEL_FILE_PREFIX)
self.layer_keys = self._get_layer_keys()
self.layer_count = len(self.layer_keys)
self.tp_degree = self.zero_checkpoint.get_src_tp_degree() if tp_degree is None else tp_degree
self.pp_degree = self.zero_checkpoint.get_src_pp_degree() if pp_degree is None else pp_degree
self.dp_degree = self.zero_checkpoint.get_src_dp_degree() if dp_degree is None else dp_degree
self.original_world_size = self.zero_checkpoint.get_src_tp_degree() * self.zero_checkpoint.get_src_pp_degree(
) * self.zero_checkpoint.get_src_dp_degree()
self.world_size = self.tp_degree * self.pp_degree * self.dp_degree
self.old_2d_map = meg_2d_parallel_map(self.zero_checkpoint.get_src_pp_degree(),
self.zero_checkpoint.get_src_tp_degree())
self.old_2d_map.simple_init()
self.new_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.zero_checkpoint.get_src_pp_degree(),
old_tp_degree=self.zero_checkpoint.get_src_tp_degree(),
new_pp_degree=self.pp_degree,
new_tp_degree=self.tp_degree)
if self.is_change_pp_degree() or self.is_change_tp_degree() or self.is_change_dp_degree():
self.zero_checkpoint.reshape(model_3d_desc(self.pp_degree, self.tp_degree, self.dp_degree))
self.global_state = {}
self._sanity_check()
self.pp_to_transformer_map = self._build_pp_transformer_map()
self.transformer_file_map = self._build_transformer_file_map()
self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX)
self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX)
self._build_global_state()
def is_change_tp_degree(self):
return self.tp_degree != self.zero_checkpoint.get_src_tp_degree()
def is_change_pp_degree(self):
return self.pp_degree != self.zero_checkpoint.get_src_pp_degree()
def is_change_dp_degree(self):
return self.dp_degree != self.zero_checkpoint.get_src_dp_degree()
def show_2d_mapping(self):
print(f'reshaped 2d map ---- begin')
for i in range(self.pp_degree):
for j in range(self.tp_degree):
file_list = self.get_2d_parallel_files(pp_index=i, tp_index=j)
print(f'[{i}, {j}] = {file_list}')
print(f'reshaped 2d map ---- end')
def show_tp_embedding_map(self):
self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers')
def show_tp_final_norm_map(self):
self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers')
def show_pp_transformer_map(self):
self._dump_mapping(self.pp_to_transformer_map, 'pp_to_transformer_layers')
def show_transformer_file_map(self):
self._dump_mapping(self.transformer_file_map, 'rank_to_transformer_files')
def _build_global_state(self):
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None)
def get_zero_checkpoint_state(self, pp_index, tp_index, dp_index) -> dict:
return self.zero_checkpoint.get_state_for_rank(pp_index=pp_index,
tp_index=tp_index,
dp_index=dp_index,
keys_to_ignore=[PARAM_SHAPES])
def get_zero_files(self, pp_index, tp_index, dp_index) -> list:
return self.zero_checkpoint.get_files_for_rank(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index)
def get_embedding_layer_id(self):
return self.layer_keys[EMBEDDING_LAYER_INDEX]
def get_final_norm_layer_id(self):
return self.layer_keys[FINAL_LAYER_NORM_INDEX]
def get_iteration(self):
if not ITERATION_KEY in self.global_state:
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
return self.global_state[ITERATION_KEY]
def get_embedding_state(self, tp_index: int) -> Dict:
assert tp_index in self.tp_to_embedding_map.keys()
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]]
sd = self._merge_state_dicts(sd_list)
return sd
def get_embedding_files(self, tp_index: int) -> list:
assert tp_index in self.tp_to_embedding_map.keys()
return self.tp_to_embedding_map[tp_index]
def _get_checkpoint_value(self, key):
if not key in self.global_state:
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[key] = sd.get(key, None)
return self.global_state[key]
def get_args(self):
return self._get_checkpoint_value(ARGS_KEY)
def get_checkpoint_info(self, info_key=CHECKPOINT_INFO_KEY):
return self._get_checkpoint_value(info_key)
def get_2d_parallel_state(self, tp_index: int, pp_index: int) -> dict:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
fname_list = self.get_2d_parallel_files(tp_index=tp_index, pp_index=pp_index)
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
merged_sd = None
for sd in sd_list:
if merged_sd is None:
merged_sd = sd
else:
merged_sd = merge_state(merged_sd, sd)
return merged_sd
def get_transformer_state(self, tp_index: int, pp_index: int) -> list:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
t_list = []
for fname_list in self.transformer_file_map[(tp_index, pp_index)]:
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
sd = self._merge_state_dicts(sd_list)
t_list.append(sd)
return t_list
def get_pp_transformer_map(self, pp_index: int) -> list:
assert pp_index < self.pp_degree
return self.pp_to_transformer_map[pp_index]
def get_final_norm_state(self, tp_index: int) -> Dict:
assert tp_index in self.tp_to_final_norm_map.keys()
sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu'))
return sd
def get_final_norm_files(self, tp_index: int) -> list:
assert tp_index in self.tp_to_final_norm_map.keys()
return self.tp_to_final_norm_map[tp_index]
def _build_tp_other_layer_map(self, layer_index: int):
assert layer_index < len(self.layer_files)
layer_files = get_files_with_prefix(self.layer_files, self.layer_keys[layer_index])
layer_file_partitions = partition_data(layer_files, self.tp_degree)
data_map = {i: flist for i, flist in enumerate(layer_file_partitions)}
return data_map
def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
file_indices = self.new_2d_map.get_data(pp_index=pp_index, tp_index=tp_index)
return [self.mp_rank_files[i] for i in file_indices]
def _build_pp_transformer_map(self):
data_map = {}
transformer_layers = self.layer_keys[1:-1]
layers_per_pp = len(transformer_layers) // self.pp_degree
data_map = {i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] for i in range(0, self.pp_degree)}
return data_map
def _dump_mapping(self, data_map, map_tag=None):
if map_tag is not None:
print(f'Dump mapping: {map_tag}')
for k, v in data_map.items():
print(f'{k} = {v}')
def _build_transformer_file_map(self):
transformer_layer_keys = self.layer_keys[1:-1]
file_map = {}
# XXX: this is not guaranteed
layers_per_pp = len(transformer_layer_keys) // self.pp_degree
if layers_per_pp == 0:
layers_per_pp = 1
#print(f"{transformer_layer_keys} {layers_per_pp}")
for key_index, layer_key in enumerate(transformer_layer_keys):
pp_index = key_index // layers_per_pp
layer_files = get_files_with_prefix(self.layer_files, layer_key)
layer_file_partitions = partition_data(layer_files, self.tp_degree)
for tp_index in range(self.tp_degree):
map_key = (tp_index, pp_index)
if not map_key in file_map.keys():
file_map[map_key] = []
file_map[map_key].append(layer_file_partitions[tp_index])
return file_map
def _sanity_check(self):
assert len(self.mp_rank_files) % self.tp_degree == 0
assert len(self.layer_keys) > 2
assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0
# XXX: fix me - isn't always the case
# only true with --pp-partition-method 'type:transformer|embedding' \
# assert (len(self.layer_keys) - 2) % self.pp_degree == 0
def validate_files(self):
for file in self.file_list:
if not os.path.isfile(file):
print(f'Error: {file} is not existent')
def _get_layer_keys(self):
key_set = set()
key_len = len(LAYER_FILE_PREFIX) + 2
for file_path in self.layer_files:
_, fname = os.path.split(file_path)
key_set.add(fname[:key_len])
return sorted(list(key_set))
def _merge_state_dicts(self, sd_list):
merged_sd = {}
for key in sd_list[0].keys():
if not key in SEQUENTIAL_LAYERS:
cat_dim = LAYER_CONCAT_DIM.get(key, 0)
merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim)
else:
merged_sd[key] = sd_list[0][key]
return merged_sd
def _validate_folder(self, dir):
basic_folder_validation(dir)
file_list = get_files(dir)
for file_prefix in [MODEL_FILE_PREFIX, LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01']:
ckpt_files = get_files_with_prefix(file_list, file_prefix)
assert len(
ckpt_files
) > 0, f'{dir} seems a bogus DeepSpeed checkpoint folder: Cannot find {file_prefix}* files in there.'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .reshape_utils import partition_data
class meg_2d_parallel_map(object):
def __init__(self, pp_degree, tp_degree):
self.pp_degree = pp_degree
self.tp_degree = tp_degree
self.map = {}
def simple_init(self):
self.map = {
self._make_key(i // self.tp_degree, i % self.tp_degree): [i]
for i in range(self.pp_degree * self.tp_degree)
}
def add_data(self, pp_index, tp_index, data):
self._validate_indices(pp_index, tp_index)
assert type(data) is list
key = self._make_key(pp_index, tp_index)
if not key in self.map.keys():
self.map[key] = []
self.map[key] += data
def get_data(self, pp_index=None, tp_index=None):
self._validate_indices(pp_index, tp_index)
pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
result = []
for i in pp_indices:
for j in tp_indices:
result += self.map[self._make_key(i, j)]
return result
def print_data(self, tag):
print(f'{tag}')
for key, value in self.map.items():
print(f'{key} = {value}')
def _validate_indices(self, pp_index, tp_index):
assert pp_index is None or pp_index < self.pp_degree
assert tp_index is None or tp_index < self.tp_degree
def _make_key(self, i, j):
return f'{i},{j}'
def _reshape_tp_dimension(old_2d_map, new_tp_degree):
old_pp_degree = old_2d_map.pp_degree
new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree)
for i in range(old_pp_degree):
ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None)
split_ranks = partition_data(ranks_for_pp_index, new_tp_degree)
for j in range(new_tp_degree):
new_2d_map.add_data(i, j, split_ranks[j])
return new_2d_map
def _reshape_pp_dimension(old_2d_map, new_pp_degree):
old_tp_degree = old_2d_map.tp_degree
new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree)
for i in range(old_tp_degree):
ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i)
split_ranks = partition_data(ranks_for_tp_index, new_pp_degree)
for j in range(new_pp_degree):
new_2d_map.add_data(j, i, split_ranks[j])
return new_2d_map
def reshape_meg_2d_parallel(old_pp_degree, old_tp_degree, new_pp_degree, new_tp_degree, verbose=False):
assert new_pp_degree <= old_pp_degree
assert new_tp_degree <= old_tp_degree
old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree)
old_2d_map.simple_init()
if verbose:
old_2d_map.print_data(f'original_2d_map:')
if old_tp_degree != new_tp_degree:
new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree)
else:
new_tp_map = old_2d_map
if verbose:
new_tp_map.print_data(f'after_tp_reshape:')
if old_pp_degree != new_pp_degree:
final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree)
else:
final_map = new_tp_map
if verbose:
final_map.print_data(f'final_2d_map:')
return final_map
def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None):
"""
Initialize model data parallel groups.
Arguments:
tp_size: number of GPUs used to parallelize model tensor.
pp_size: number of GPUs used to parallelize model pipeline.
dp_size: number of GPUs used to parallelize model data.
Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
and 8 data-parallel groups as:
8 data_parallel groups:
[g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
8 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
4 pipeline model-parallel groups:
[g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
world_size = tp_size * pp_size * dp_size
print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}")
tensor_model_parallel_size = min(tp_size, world_size)
pipeline_model_parallel_size = min(pp_size, world_size)
data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size)
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
num_data_parallel_groups = world_size // data_parallel_size
# Build the data-parallel groups.
all_dp_group_ranks = []
for i in range(pipeline_model_parallel_size):
start_rank = i * num_pipeline_model_parallel_groups
end_rank = (i + 1) * num_pipeline_model_parallel_groups
for j in range(tensor_model_parallel_size):
ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
all_dp_group_ranks.append(list(ranks))
print("DP", all_dp_group_ranks)
# Build the model-parallel groups.
all_pp_group_ranks = []
for i in range(data_parallel_size):
ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_dp_group_ranks]
all_pp_group_ranks.append(list(ranks))
print(f"PP", all_pp_group_ranks)
# Build the tensor model-parallel groups.
all_tp_group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
all_tp_group_ranks.append(list(ranks))
print(f"TP", all_tp_group_ranks)
return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks
# # Build the pipeline model-parallel groups and embedding groups
# # (first and last rank in each pipeline model-parallel group).
# for i in range(num_pipeline_model_parallel_groups):
# ranks = range(i, world_size,
# num_pipeline_model_parallel_groups)
# print(f"EMB{i}", list(ranks))
def reshape(src, tgt):
"""
reshape([tp_size_src, pp_size_src, dp_size_src],
[tp_size_tgt, pp_size_tgt, dp_size_tgt])
"""
print(f"\n\n*** Reshaping: {src} => {tgt}")
tp_size_src, pp_size_src, dp_size_src = src
tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt
tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src)
tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src)
tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src)
# handle tp contraction first
print("\n*** TP contraction:")
for i, r in enumerate(tp_ranks1):
print(f'{tp_ranks1[i]} => {tp_ranks2[i]}')
# handle pp contraction next
print("\n*** PP contraction:")
for i, r in enumerate(pp_ranks1):
print(f'{pp_ranks2[i]} => {pp_ranks3[i]}')
# easy
#reshape([2,2,1],[1,1,1])
# probably need more logic to suggest how to pack
#reshape([4,4,1],[2,2,1])
#reshape([2,4,2], [8,32,1])
# get_mpu_ranks(2,2,2)
# get_mpu_ranks(4,2,1)
# get_mpu_ranks(2,4,1)
# get_mpu_ranks(1,1,8)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import types
from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_DIVISIBILITY_PADDING_TENSOR, CAT_DIM)
def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
hp_mapping = self._hp_mapping
optim_state_keys = hp_mapping.get_optim_state_keys()
hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys
checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys}
for file in checkpoint_files.values():
assert os.path.isfile(file), f'{file} is not a valid file'
for key in hp_keys:
ckpt_file = checkpoint_files[key]
ckpt_dict = torch.load(ckpt_file)
full_hp_param = ckpt_dict[PARAM]
# need to deal with slices that were averaged.
# the opposite of averaging here becomes an exact copy of the first slice
# I thought of 2 ways:
# implementation a. find a way for a client to pass a dict with patterns
# if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
# tp_rank = 0
# tp_world_size = 1
# the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
# self.shape that means we automatically copy?
# implementation b.
# this version requires no additional data passed from the client
# if the shapes already match it must be slices that were averaged - so we just hack around those
if full_hp_param.shape == self.shape:
tp_rank = 0
tp_world_size = 1
# special case for word_embeddings weights which get padded differently depending on TP degree.
# the converter to universal currently strips the original padding completely so the saved
# weight is padding-free and we just need to add new padding depending on the target TP
# degree
vocab_divisibility_padding_tensor = ckpt_dict.get(VOCAB_DIVISIBILITY_PADDING_TENSOR, None)
if vocab_divisibility_padding_tensor is not None:
# In the absence of data passed from the user wrt new padded vocab specific to tp degree
# we can again derive that data by reverse engineering the target shapes like so:
padded_target_vocab_size = self.shape[0] * tp_world_size
if padded_target_vocab_size > full_hp_param.shape[0]:
# Need to expand
padding_size = padded_target_vocab_size - full_hp_param.shape[0]
# Implement the following concat in efficient way using pad
#full_hp_param = torch.cat((full_hp_param, padding_tensor), 0)
full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
full_hp_param[:-padding_size, :] = vocab_divisibility_padding_tensor
else:
# Need to shrink or keep the same
full_hp_param = full_hp_param[:padded_target_vocab_size, :]
full_param_numel = full_hp_param.numel()
tp_slice_numel = self.numel()
# if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
# print_rank_0(f'{full_hp_param[:10]=}', force=True)
assert full_param_numel == tp_world_size * tp_slice_numel, \
f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key)
# print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
# print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
# since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
chunk_dim = ckpt_dict.get(CAT_DIM, 0)
# this performs the opposite of cat when merging TP slices
tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
tp_hp_slice = tp_hp_slice.flatten()
lp_frag_address = hp_mapping.lp_fragment_address
tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
assert dst_tensor.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
# print(f"{key} SHAPE: {tp_hp_slice.shape=}")
# print(f"{key} SHAPE: {dst_tensor.shape=}")
# print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
dst_tensor.data.copy_(tp_hp_fragment.data)
def enable_universal_checkpoint(param_list):
for param in param_list:
param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Various symbolic constants used for model checkpointing
"""
#########################################
# Optimizer checkpoint keys
#########################################
OPTIMIZER_STATE_DICT = "optimizer_state_dict"
FP32_GROUPS = "fp32_groups"
FP32_FLAT_GROUPS = 'fp32_flat_groups'
BASE_OPTIMIZER_STATE = 'base_optimizer_state'
SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups"
GROUP_PADDINGS = 'group_paddings'
PARTITION_COUNT = 'partition_count'
ZERO_STAGE = 'zero_stage'
CLIP_GRAD = 'clip_grad'
FP32_WEIGHT_KEY = "fp32"
#########################################
# Module checkpoint keys
#########################################
PARAM = 'param'
PARAM_SHAPES = 'param_shapes'
BUFFER_NAMES = 'buffer_names'
FROZEN_PARAM_SHAPES = 'frozen_param_shapes'
FROZEN_PARAM_FRAGMENTS = 'frozen_param_fragments'
#########################################
# Checkpoint naming constants
#########################################
MODEL_FILE_PREFIX = 'mp_rank_'
ZERO_FILE_PREFIX = 'zero_pp_rank_'
OPTIM_FILE_SUFFIX = '_optim_states.pt'
MODEL_FILE_SUFFIX = '_model_states.pt'
LAYER_FILE_PREFIX = 'layer_'
BF16_ZERO_FILE_PREFIX = 'bf16_' + ZERO_FILE_PREFIX
FP16_ZERO_FILE_PREFIX = 'fp16_' + ZERO_FILE_PREFIX
#########################################
# Checkpoint utility keys
#########################################
DS_VERSION = 'ds_version'
#########################################
# Universal Checkpoint keys
#########################################
UNIVERSAL_CHECKPOINT_INFO = 'universal_checkpoint_info'
UNIVERSAL_CHECKPOINT_VERSION_KEY = 'universal_checkpoint_version'
# Reserve version 0.1 for the hardcoded logic used in BLOOM-176B training
UNIVERSAL_CHECKPOINT_VERSION_VALUE = 0.2
# Vocabulary padding
VOCAB_DIVISIBILITY_PADDING_TENSOR = 'vocab_divisibility_padding_tensor'
PADDED_VOCAB_SIZE = 'padded_vocab_size'
ORIGINAL_VOCAB_SIZE = 'original_vocab_size'
# Parameter splitting/merging
PARAM_SLICE_MAPPINGS = 'param_slice_mappings'
CAT_DIM = "cat_dim"
# Regex list of parameters that require special handling
VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns'
PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns'
PARAMETER_TO_AVERAGE_PATTERNS = 'parameter_to_average_patterns'
PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .reshape_meg_2d import reshape_meg_2d_parallel
from .deepspeed_checkpoint import DeepSpeedCheckpoint
from .utils import (get_layer_ckpt_name_for_rank, get_model_ckpt_name_for_rank, get_zero_ckpt_name_for_rank)
from .reshape_utils import (merge_state)
from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
from .zero_checkpoint import ZeROCheckpoint
from .universal_checkpoint import enable_universal_checkpoint
from .constants import *
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .reshape_utils import (get_files, get_files_with_prefix, partition_data, get_zero_files)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
PP_DIM = 'PP'
TP_DIM = 'TP'
DP_DIM = 'DP'
class model_3d_desc(object):
def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1):
self.pp_degree = pp_degree
self.tp_degree = tp_degree
self.dp_degree = dp_degree
def reshape(self, target_3d_desc, verbose=False):
valid_reshape, reshape_errors = self.can_reshape(target_3d_desc)
assert valid_reshape, ','.join(reshape_errors)
tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree,
old_tp_degree=self.tp_degree,
new_pp_degree=target_3d_desc.pp_degree,
new_tp_degree=target_3d_desc.tp_degree,
verbose=verbose)
flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map,
src_2d_size=self.pp_degree * self.tp_degree,
dp_degree=self.dp_degree)
return unflatten_dp_dimension(meg_2d_map=flat_3d_map, dp_degree=target_3d_desc.dp_degree)
def get_desc(self):
return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})'
def world_size(self):
return self.pp_degree * self.tp_degree * self.dp_degree
def is_valid(self, pp_index, tp_index, dp_index):
err_msg = []
valid = True
for index, degree, dim_name in [(pp_index, self.pp_degree, PP_DIM), (tp_index, self.tp_degree, TP_DIM),
(dp_index, self.dp_degree, DP_DIM)]:
if index >= degree:
valid = False
err_msg.append(f'{dim_name} indexing error: index {index} >= degree {degree}')
return valid, err_msg
def can_reshape(self, target_3d_desc):
err_msg = []
if target_3d_desc.pp_degree > self.pp_degree:
err_msg.append(
f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}')
if target_3d_desc.tp_degree > self.tp_degree:
err_msg.append(
f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}')
if target_3d_desc.dp_degree > self.dp_degree:
err_msg.append(
f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}')
return len(err_msg) == 0, err_msg
def get_model_3d_descriptor(dir):
file_list = get_files(dir)
zero_file_list = get_zero_files(dir)
num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01'))
if num_pp0_files > 0:
tp_degree = num_pp0_files
pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree
dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree))
else:
tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX))
dp_degree = max(1, len(zero_file_list) // tp_degree)
pp_degree = 0
return model_3d_desc(pp_degree, tp_degree, dp_degree)
def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree):
new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree)
for pp_index in range(meg_2d_map.pp_degree):
for tp_index in range(meg_2d_map.tp_degree):
dp0_indices = meg_2d_map.get_data(pp_index, tp_index)
for idx in dp0_indices:
dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)]
new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices)
return new_meg_2d_map
def unflatten_dp_dimension(meg_2d_map, dp_degree):
pp_degree = meg_2d_map.pp_degree
tp_degree = meg_2d_map.tp_degree
meg_2d_map_list = [meg_2d_parallel_map(pp_degree=pp_degree, tp_degree=tp_degree) for _ in range(dp_degree)]
for pp_index in range(pp_degree):
for tp_index in range(tp_degree):
flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index)
partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree)
for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list):
_2d_map.add_data(pp_index, tp_index, dp_indices)
return meg_2d_map_list
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
ckpt_name = os.path.join(
base_folder,
MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
)
return ckpt_name
def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
zero_ckpt_name = os.path.join(
base_folder,
zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
)
return zero_ckpt_name
def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
ckpt_path = os.path.join(base_folder, ckpt_file)
return ckpt_path
# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
def clone_tensors_for_torch_save(item, device=torch.device('cpu')):
"""
Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
- ``item``: tensor to clone or (possibly nested) container of tensors to clone.
- ``device``: target device (defaults to 'cpu')
Returns:
- copy of ``item`` with cloned tensors on target device
"""
if torch.is_tensor(item):
return item.detach().clone().to(device)
elif isinstance(item, list):
return [clone_tensors_for_torch_save(v, device) for v in item]
elif isinstance(item, tuple):
return tuple([clone_tensors_for_torch_save(v, device) for v in item])
elif isinstance(item, dict):
return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})
else:
return item
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from collections import OrderedDict
from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)
def basic_folder_validation(dir):
assert os.path.exists(dir), f'{dir} path does not exist'
assert os.path.isdir(dir), f'{dir} is not a folder'
def get_files_with_prefix(all_files, prefix):
file_list = []
for file_path in all_files:
_, fname = os.path.split(file_path)
if fname.startswith(prefix):
file_list.append(file_path)
return sorted(file_list)
def validate_files(file_list):
for file in file_list:
if not os.path.isfile(file):
print(f'Error: {file} is not existent')
def get_files(dir):
file_list = []
for root, _, files in os.walk(dir):
for file in files:
file_list.append(os.path.join(root, file))
return file_list
def get_zero_files(dir):
file_list = get_files(dir)
for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:
zero_files = get_files_with_prefix(file_list, prefix)
if len(zero_files) > 0:
return zero_files
return []
def partition_data(data_list, num_partitions):
num_elems = len(data_list)
assert num_elems % num_partitions == 0
partition_size = num_elems // num_partitions
partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]
return partitions_list
def _key_list_to_string(key_list):
return '.'.join(key_list)
def merge_state_dict(dict_a, dict_b, key_list):
merged_dict = type(dict_a)({})
for key, value in dict_b.items():
if key in dict_a.keys():
merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])
else:
merged_dict[key] = value
return merged_dict
def merge_state_list(list_a, list_b, key_list):
if len(list_a) != len(list_b):
print(f'{_key_list_to_string(key_list)}')
raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')
return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]
def merge_state(state_a, state_b, key_list=[]):
if type(state_a) != type(state_b):
key_list_string = _key_list_to_string(key_list)
print(f'key_list = {key_list_string}')
raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')
if type(state_a) in (dict, OrderedDict):
return merge_state_dict(state_a, state_b, key_list)
elif type(state_a) in (list, tuple):
return type(state_a)(merge_state_list(state_a, state_b, key_list))
elif torch.is_tensor(state_a):
return torch.cat([state_a, state_b], 0)
else:
return state_a
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT)
from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state)
from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
GROUP_STATE_KEY = 'state'
class ZeROCheckpoint(object):
def __init__(self, dir):
basic_folder_validation(dir)
self.dir = dir
self.file_list = get_zero_files(dir)
self.num_files = len(self.file_list)
assert self.num_files > 0, f'No ZeRO files found in {dir}'
self.src_3d = get_model_3d_descriptor(dir)
self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree,
tp_degree=self.src_3d.tp_degree,
dp_degree=self.src_3d.dp_degree)
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def get_src_world_size(self):
return self.src_3d.world_size()
def get_src_tp_degree(self):
return self.src_3d.tp_degree
def get_src_pp_degree(self):
return self.src_3d.pp_degree
def get_src_dp_degree(self):
return self.src_3d.dp_degree
def get_file_indices_for_rank(self, pp_index, tp_index, dp_index):
assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}'
dp_2d_map = self._3d_file_map[dp_index]
return dp_2d_map.get_data(pp_index, tp_index)
def get_files_for_rank(self, pp_index, tp_index, dp_index):
file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index)
return [self.file_list[idx] for idx in file_idx_list]
def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True):
state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index)
merged_sd = None
for state_file in state_file_list:
sd = torch.load(state_file, map_location=torch.device('cpu'))
for key in keys_to_ignore:
sd.pop(key, None)
if strip_tensor_paddings:
self._strip_tensor_paddings(sd)
if merged_sd is None:
merged_sd = sd
else:
merged_sd = merge_state(merged_sd, sd)
self._update_partition_count(merged_sd)
if strip_tensor_paddings:
self._clear_group_paddings(merged_sd)
return merged_sd
def print_3d_index_map(self, tag=None):
if tag:
print(f'3D index map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
_2d_map.print_data(f'dp = {dp_index}')
def print_3d_file_map(self, tag=None):
if tag:
print(f'3D file map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
for pp_index in _2d_map.pp_degree:
for tp_index in _2d_map.tp_degree:
file_index_list = _2d_map.get_data(pp_index, tp_index)
file_list = [self.file_list[idx] for idx in file_index_list]
print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}')
def reshape(self, target_3d_desc: model_3d_desc):
self.target_3d = target_3d_desc
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def _strip_tensor_paddings(self, sd):
param_group_states = self._get_param_group_states(sd)
if param_group_states is None:
return
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings is None:
return
for key, group_state in param_group_states.items():
if group_paddings[key] == 0:
continue
for state_name, state_value in group_state.items():
if torch.is_tensor(state_value):
raw_length = state_value.numel() - group_paddings[key]
group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone()
def _clear_group_paddings(self, sd):
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings:
num_groups = len(group_paddings)
sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups
def _get_optimizer_state(self, sd, state_key):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
return optimizer_state.get(state_key, None)
def _get_param_group_states(self, sd):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None)
if base_optimizer_state is None:
return None
return base_optimizer_state.get(GROUP_STATE_KEY, None)
def _update_partition_count(self, sd):
partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT)
if partition_counts:
num_groups = len(partition_counts)
sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject
from deepspeed.nebula.constants import *
class DeepSpeedNebulaConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedNebulaConfig, self).__init__()
self.enabled = None
self.persistent_storage_path = None
self.persistent_time_interval = None
self.num_of_version_in_retention = None
self.enable_nebula_load = None
if NEBULA in param_dict.keys():
nebula_dict = param_dict[NEBULA]
else:
nebula_dict = {}
self._initialize(nebula_dict)
def _initialize(self, nebula_dict):
self.enabled = get_scalar_param(nebula_dict, NEBULA_ENABLED, NEBULA_ENABLED_DEFAULT)
self.load_path = get_scalar_param(nebula_dict, NEBULA_LOAD_PATH, NEBULA_LOAD_PATH_DEFAULT)
self.enable_nebula_load = get_scalar_param(nebula_dict, NEBULA_ENABLE_NEBULA_LOAD,
NEBULA_ENABLE_NEBULA_LOAD_DEFAULT)
self.persistent_storage_path = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_STORAGE_PATH,
NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT)
self.persistent_time_interval = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_TIME_INTERVAL,
NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT)
self.num_of_version_in_retention = get_scalar_param(nebula_dict, NEBULA_NUM_OF_VERSION_IN_RETENTION,
NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# nebula
#########################################
# Nebula. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
NEBULA_FORMAT = '''
nebula should be enabled as:
"session_params": {
"nebula": {
"enabled": true,
"persistent_storage_path": "/foo/bar",
"persistent_time_interval": 100,
"num_of_version_in_retention": 2,
"enable_nebula_load": true
}
}
'''
NEBULA = "nebula"
NEBULA_ENABLED = "enabled"
NEBULA_ENABLED_DEFAULT = False
# There is a case where customer want to load the checkpoint saved
# by raw torch. Because nebula cannot load torch checkpoint directly
# as they have different folder structures to bring the gap for
# loading(the data are totally same in bytes for torch and nebula s
# aving).
# In this case, we must disable nebula load to use raw torch load.
# Customer can just set NEBULA_ENABLE_NEBULA_LOAD to False. Then use
# original way of deepspeed to load, i.e. set the value of "--load".
NEBULA_ENABLE_NEBULA_LOAD = "enable_nebula_load"
NEBULA_ENABLE_NEBULA_LOAD_DEFAULT = True
# When you want to resume the previous checkpoint saved by nebula,
# you can set NEBULA_LOAD_PATH as the parent folder of checkpoint.
# If NEBULA_LOAD_PATH is None, the NEBULA_PERSISTENT_STORAGE_PATH
# will be the default path to load.
NEBULA_LOAD_PATH = "nebula_load_path"
NEBULA_LOAD_PATH_DEFAULT = None
# Nebula will save the checkpoint under NEBULA_LOAD_PATH in the
# asynchronous way.
NEBULA_PERSISTENT_STORAGE_PATH = "persistent_storage_path"
NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT = None
# Time interval to trigger the nebula persistence.
NEBULA_PERSISTENT_TIME_INTERVAL = "persistent_time_interval"
NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT = 100
# Checkpoint number which will be kept in memory. Let us say,
# if the value is 2. Then we have checkpoints 1 and 2 are ready
# now. When it comes to checkpoint 3, the 1 will be removed if
# 1 has been persisted to disk.
NEBULA_NUM_OF_VERSION_IN_RETENTION = "num_of_version_in_retention"
NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT = 2
# Nebula envs
NEBULA_EXPORT_ENVS = [
'DLTS_JOB_ID', 'DLTS_NUM_WORKER', 'NEBULA_PERSISTENT_STORAGE_PATH', 'NEBULA_PERSISTENT_TIME_INTERVAL',
'AML_RUN_ID', 'AZUREML_RUN_TOKEN', 'AZUREML_WORKSPACE_SCOPE', 'AZUREML_EXPERIMENT_SCOPE',
'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT', 'AZUREML_RUN_ID', 'NEBULA_MEMORY_BUFFER_SIZE',
'AZUREML_PARAMETER_ITPJOB_NAME', 'FC_TASKROLE_NAME', 'FC_TASK_INDEX', 'MASTER_HOST', 'LOCAL_HOST',
'AZUREML_BLOB_ACCOUNT_NAME', 'AZUREML_BLOB_ACCOUNT_KEY'
]
# ITP env files
DLTS_POD_ENV_PATH = '/dlts-runtime/env/pod.env'
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''Copyright The Microsoft DeepSpeed Team'''
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.inference.config import DeepSpeedInferenceConfig
from deepspeed.module_inject.replace_policy import replace_policies
from deepspeed.module_inject.utils import policy_to_ds_container
from .engine import DeepSpeedEngine
from .utils import TLinear, get_inactive_params
from deepspeed.runtime.zero import GatheredParameters
import time
import gc
import math
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from torch import nn
from deepspeed.utils import logger
from deepspeed.ops.op_builder import InferenceBuilder
from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
inference_cuda_module = None
class DeepSpeedHybridEngine(DeepSpeedEngine):
r"""DeepSpeed engine for training and inference."""
inference_mp_group = None
def __init__(self, args, model, **kwargs):
super().__init__(args, model, **kwargs)
# synch seed between all GPUs
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
dist.broadcast(_rng_state, 0)
get_accelerator().set_rng_state(_rng_state.cpu())
self.Z3_enabled = (self._config.zero_config.stage == 3)
self.gather_all_layers = self._config.hybrid_engine.pin_parameters
# inference containers / fwds
self._inference_containers = []
self._orig_modules = []
self._orig_fwds = []
self.create_inference_module()
# Performance stats
self._t_start = None
self._total_latency = 0
self._iters = 0
self._training_start_time = None
self._generate_latency = 0
self._training_latency = 0
self._total_batch_size = None
self._gather_latency = 0
global inference_cuda_module
if inference_cuda_module is None:
builder = InferenceBuilder()
inference_cuda_module = builder.load()
self.is_lora_fused = False
def convert_to_linear_transposed(self, model):
def _replace_linear_layer(r_module, parent_type=None, prev_type=None):
for name, child in r_module.named_children():
if child.__class__ in [torch.nn.Linear] and \
(parent_type is torch.nn.ModuleList or prev_type is torch.nn.ModuleList):
setattr(r_module, name, TLinear(child, name))
else:
_replace_linear_layer(child, type(r_module), prev_type=parent_type)
return r_module
_replace_linear_layer(model)
def new_inference_container(self, orig_layer, policy_cls, layer_id):
policy = policy_cls(orig_layer, inference=True)
_container = policy_to_ds_container(
policy=policy,
config=DeepSpeedInferenceConfig(
set_empty_params=True,
dtype=torch.float16 if self._config.fp16_enabled else torch.float32,
max_out_tokens=self._config.hybrid_engine.max_out_tokens,
min_out_tokens=self._config.hybrid_engine.max_out_tokens,
transposed_mode=True,
),
model_config=self.module.config if hasattr(self.module, 'config') else None,
layer_id=layer_id,
child=orig_layer)
if self.mpu is not None:
if hasattr(self.mpu, 'get_model_parallel_world_size'):
_container.set_tensor_parallel_config(self.mpu.get_model_parallel_world_size(),
self.mpu.get_model_parallel_group())
else:
_container.set_tensor_parallel_config(self.mpu.get_tensor_model_parallel_world_size(),
self.mpu.get_tensor_model_parallel_group())
else:
_container.set_tensor_parallel_config(self._config.hybrid_engine.inference_tp_size, self.mp_group)
_container.initialize_tensors(enable_training=True)
_container.create_ds_model_config()
_container.create_module()
_container.set_params_wo_copy(Z3_enabled=self.Z3_enabled)
return _container
def populate_all_inference_policies(self):
self.inference_policies = {}
for plcy in replace_policies:
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
self.inference_policies.update({orig_layer_class: (self.new_inference_container, plcy)})
elif plcy._orig_layer_class is not None:
self.inference_policies.update({plcy._orig_layer_class: (self.new_inference_container, plcy)})
self.inference_policies.update({
nn.Linear: (LinearLayer, ),
nn.Embedding: (EmbeddingLayer, ),
nn.LayerNorm: (Normalize, ),
OPTLearnedPositionalEmbedding: (OPTEmbedding, )
})
def _fuse_lora(self, params, lora_params):
maybe_has_lora_params = [p for p in params if len(p.shape) > 1]
for lora_param, weight in zip(lora_params, maybe_has_lora_params):
if len(lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = lora_param
weight.data += lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def fuse_lora_weight(self):
for layer_id in range(len(self.layer_params)):
self._fuse_lora(self.layer_params[layer_id], self.lora_params[layer_id])
def _unfuse_lora(self, params, lora_params):
maybe_has_lora_params = [p for p in params if len(p.shape) > 1]
for lora_param, weight in zip(lora_params, maybe_has_lora_params):
if len(lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = lora_param
weight.data -= lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def unfuse_lora_weight(self):
for layer_id in range(len(self.layer_params)):
self._unfuse_lora(self.layer_params[layer_id], self.lora_params[layer_id])
def unfuse_lora_weight_non_pinned(self):
for layer_id in range(len(self.layer_params)):
non_active_params = get_inactive_params(self.layer_params[layer_id])
non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id])
non_active_params.extend(non_active_lora_params)
with GatheredParameters(non_active_params):
self._unfuse_lora(self.layer_params[layer_id], self.lora_params[layer_id])
def retake_inference_cache(self):
if self._config.hybrid_engine.release_inference_cache:
retake_success = inference_cuda_module.retake_workspace()
if not retake_success:
logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.")
gc.collect()
get_accelerator().empty_cache()
retake_success = inference_cuda_module.retake_workspace()
if not retake_success:
raise RuntimeError("Unable to retake inference workspace.")
def generate(self, *inputs, **kwargs):
if self._total_batch_size is None:
bsz = inputs[0].shape[0] if len(inputs) > 0 else \
kwargs['input_ids'].shape[0]
self._total_batch_size = bsz * dist.get_world_size()
self._t0 = time.time()
if self.Z3_enabled and self.gather_all_layers:
if self._config.hybrid_engine.inference_tp_size > 1:
non_tp_params = []
for other_layer in self._other_layers:
non_tp_params.extend(list(other_layer.parameters()))
partition_size = self._config.hybrid_engine.tp_gather_partition_size
layer_groups = math.ceil(len(self.layer_params) / partition_size)
for lg in range(layer_groups):
non_active_params = []
non_active_lora_params = []
for layer_id in range(lg * partition_size, min(len(self.layer_params), (lg + 1) * partition_size),
1):
non_tp_params.extend(self.layer_params[layer_id][:4])
non_active_params.extend(get_inactive_params(self.layer_params[layer_id]))
non_active_params.extend(get_inactive_params(self.layer_lora_params[layer_id]))
with GatheredParameters(non_active_params):
for layer_id in range(lg * partition_size,
min(len(self.layer_params), (lg + 1) * partition_size), 1):
if len(self.all_lora_params) > 0:
self._fuse_lora(self.layer_params[layer_id], self.lora_params[layer_id])
if self.mpu is not None:
self._inference_containers[layer_id].apply_tensor_parallelism(self.mp_replace,
reversed_dim=True)
# TODO(cmikeh2) Evaluate if this can be deferred when release_inference_cache
# is enabled.
gc.collect()
get_accelerator().empty_cache()
self._gather_latency = time.time() - self._t0
input_shape = inputs[0].shape if len(inputs) > 0 else \
kwargs['input_ids'].shape
output = torch.zeros(
(input_shape[0] * self._config.hybrid_engine.inference_tp_size, ) + input_shape[1:],
dtype=inputs[0].dtype if len(inputs) > 0 else kwargs['input_ids'].dtype,
device=inputs[0].device if len(inputs) > 0 else kwargs['input_ids'].device)
input_cont = inputs[0].contiguous() if len(inputs) > 0 else kwargs['input_ids'].contiguous()
dist.all_gather_into_tensor(output, input_cont, group=self.mp_group)
if len(inputs) > 0:
inputs = (output, )
else:
kwargs['input_ids'] = output
self.retake_inference_cache()
non_active_params = get_inactive_params(non_tp_params)
with GatheredParameters(non_active_params):
generate_ret_vals = self._generate(*inputs, **kwargs)
for layer_id in range(len(self.layer_params)):
self._inference_containers[layer_id].release_memory()
rank = dist.get_rank(group=self.mp_group)
generate_ret_vals = generate_ret_vals[input_shape[0] * rank:input_shape[0] * (rank + 1)]
else:
non_active_layers = get_inactive_params(self.all_layers_params)
non_active_lora_params = get_inactive_params(self.all_lora_params)
non_active_layers.extend(non_active_lora_params)
with GatheredParameters(non_active_layers):
self._gather_latency = time.time() - self._t0
if len(self.all_lora_params) > 0:
self.fuse_lora_weight()
self.retake_inference_cache()
generate_ret_vals = self._generate(*inputs, **kwargs)
if len(self.all_lora_params) > 0:
self.unfuse_lora_weight()
else:
if len(self.all_lora_params) > 0 and (not self.Z3_enabled):
self.fuse_lora_weight()
self.retake_inference_cache()
generate_ret_vals = self._generate(*inputs, **kwargs)
if len(self.all_lora_params) > 0:
if (not self.Z3_enabled):
self.unfuse_lora_weight()
else:
self.unfuse_lora_weight_non_pinned()
self.is_lora_fused = False
if self._config.hybrid_engine.release_inference_cache:
inference_cuda_module.release_workspace()
gc.collect()
get_accelerator().empty_cache()
self._generate_latency = time.time() - self._t0 - self._gather_latency
return generate_ret_vals
def create_inference_containers(self, module, layer_id=0):
for name, child in module.named_children():
if child.__class__ in self.inference_policies:
if self.inference_policies[child.__class__][0] == self.new_inference_container:
self._inference_containers.append(self.inference_policies[child.__class__][0](
child, self.inference_policies[child.__class__][-1], layer_id))
self._orig_modules.append(child)
self._orig_fwds.append(child.forward)
self.layer_params.append(self._inference_containers[layer_id].get_all_params())
self.lora_params.append(self._inference_containers[layer_id].get_lora_params())
self.layer_lora_params.append([])
for lora_param in self.lora_params[layer_id]:
self.layer_lora_params[layer_id].extend(lora_param[:-1])
self.all_lora_params.extend(lora_param[:-1])
layer_id += 1
else:
self._other_layers.append(self.inference_policies[child.__class__][0](
weight=child.weight, bias=child.bias if hasattr(child, 'bias') else None))
self._orig_modules_others.append(child)
self._orig_fwds_others.append(child.forward)
else:
self.create_inference_containers(child, layer_id=layer_id)
def create_inference_module(self):
self.layer_params = []
self.layer_lora_params = []
self.lora_params = []
self.all_lora_params = []
self._other_layers = []
self._orig_modules_others = []
self._orig_fwds_others = []
if self._config.hybrid_engine.inference_tp_size > 1:
if self.mpu is None:
global_rank = dist.get_rank()
world_size = dist.get_world_size()
mp_group_id = global_rank // self._config.hybrid_engine.inference_tp_size
num_mp_groups = world_size // self._config.hybrid_engine.inference_tp_size
for mp_group_id in range(num_mp_groups):
ranks = list(
range(mp_group_id * self._config.hybrid_engine.inference_tp_size, \
(mp_group_id + 1) * self._config.hybrid_engine.inference_tp_size, \
1)
)
mp_group = dist.new_group(ranks)
if global_rank in ranks:
# mp_group is used for broader collective
self.mp_group = mp_group
# mp_replace is used for container tensor slicing
from deepspeed.module_inject import ReplaceWithTensorSlicing
self.mp_replace = ReplaceWithTensorSlicing(
mp_group=self.mp_group,
mp_size=self._config.hybrid_engine.inference_tp_size,
out_dim=0,
in_dim=1)
else:
self.mp_group = self.mpu.get_model_parallel_group() if hasattr(self.mpu, 'get_model_parallel_group') else \
self.mpu.get_tensor_model_parallel_group()
from deepspeed.module_inject import ReplaceWithTensorSlicing
self.mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group,
mp_size=self._config.hybrid_engine.inference_tp_size,
out_dim=0,
in_dim=1)
else:
self.mp_group = None
self.mp_replace = None
self.populate_all_inference_policies()
self.all_layers_params = list(self.module.parameters())
self.create_inference_containers(self.module)
if len(self._inference_containers) > 0:
self._generate = self.module.generate
self.module.generate = self.generate
self._t0 = time.time()
def _zero3_forward(self, layer_id):
def run_forward(*inputs, **kwargs):
non_active_params = get_inactive_params(self.layer_params[layer_id])
non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id])
non_active_params.extend(non_active_lora_params)
with GatheredParameters(non_active_params):
if len(self.all_lora_params) > 0:
# Use the is_lora_fused flag to prevent multiple fusion in Z3 with non-pinned memory
if not self.is_lora_fused:
self._fuse_lora(self.layer_params[layer_id], self.lora_params[layer_id])
# Set the is_lora_fused to true when reaching the last layer
if layer_id == len(self.layer_params) - 1:
self.is_lora_fused = True
return self._inference_containers[layer_id].module.forward(*inputs, **kwargs)
return run_forward
def eval(self):
if self._t_start is not None:
latency = time.time() - self._t_start
self._total_latency = self._total_latency + latency
self._iters = self._iters + 1
if not dist.is_initialized() or dist.get_rank() == 0:
others = latency - (self._generate_latency + self._training_latency)
print(f'|E2E latency={(latency):.2f}s ' + \
f'|Gather latency={self._gather_latency:.2f}s ({(self._gather_latency / latency * 100):.2f}%) '
f'|Generate time={(self._generate_latency):.2f}s ({(self._generate_latency / latency * 100):.2f}%) ' + \
f'|Training time={(self._training_latency):.2f}s ({(self._training_latency / latency * 100):.2f}%) ' + \
f'|Others={others:.2f} ({(others / latency * 100):.2f}%)'
f'|CurSamplesPerSec={(1 / latency * self._total_batch_size):.2f} ' + \
f'|AvgSamplesPerSec={(1 / (self._total_latency / self._iters) * self._total_batch_size):.2f}')
self._t_start = time.time()
self._training_latency = 0
super().eval()
if len(self._inference_containers) > 0:
for i, (orig_module, inference_container) in enumerate(zip(self._orig_modules,
self._inference_containers)):
if self.Z3_enabled and not self.gather_all_layers:
orig_module.forward = self._zero3_forward(i)
else:
orig_module.forward = inference_container.module.forward
inference_container.transform_for_inference()
if not self.Z3_enabled or self.gather_all_layers:
for orig_module, inference_layer in zip(self._orig_modules_others, self._other_layers):
orig_module.forward = inference_layer.forward
if self.Z3_enabled:
gc.collect()
get_accelerator().empty_cache()
if self._t_start is None:
self._t_start = time.time()
def train(self, mode=True):
if mode and len(self._orig_modules) > 0:
for inference_container, orig_module, orig_fwd in zip(self._inference_containers, self._orig_modules,
self._orig_fwds):
inference_container.transform_for_training()
orig_module.forward = orig_fwd
for orig_module, orig_fwd in zip(self._orig_modules_others, self._orig_fwds_others):
orig_module.forward = orig_fwd
super().train(mode)
if mode:
self._training_start_time = time.time()
def step(self, lr_kwargs=None):
super().step(lr_kwargs=lr_kwargs)
if len(self._inference_containers) > 0:
for inference_container in self._inference_containers:
inference_container.reset_params()
if self._training_start_time is not None:
self._training_latency += (time.time() - self._training_start_time)
self._training_start_time = time.time()
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Union
from enum import Enum
import torch
import json
import hjson
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import get_monitor_config
from deepspeed import comm as dist
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARALLEL_SIZE,
MODEL_PARALLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy
from .data_pipeline.constants import *
TENSOR_CORE_ALIGN_SIZE = 8
ADAGRAD_OPTIMIZER = 'adagrad'
ADAM_OPTIMIZER = 'adam'
ADAMW_OPTIMIZER = 'adamw'
LAMB_OPTIMIZER = 'lamb'
ONEBIT_ADAM_OPTIMIZER = 'onebitadam'
ZERO_ONE_ADAM_OPTIMIZER = 'zerooneadam'
ONEBIT_LAMB_OPTIMIZER = 'onebitlamb'
DEEPSPEED_OPTIMIZERS = [
ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER,
ZERO_ONE_ADAM_OPTIMIZER
]
# extra optimizer parameters for adam/adamw
TORCH_ADAM_PARAM = "torch_adam"
# default to adamw logic for adam/adamw optimizers unless user explicitly opts out
ADAM_W_MODE = "adam_w_mode"
ADAM_W_MODE_DEFAULT = True
class DeepSpeedConfigError(Exception):
pass
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
int8 = torch.int8, "torch.int8", "int8"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
def get_pld_enabled(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], PLD_ENABLED, PLD_ENABLED_DEFAULT)
else:
return False
def get_pld_params(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP])
pld_params.pop(PLD_ENABLED)
return pld_params
else:
return False
def get_amp_enabled(param_dict):
if AMP in param_dict.keys():
return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
else:
return False
def get_amp_params(param_dict):
if AMP in param_dict.keys():
amp_params = copy.copy(param_dict[AMP])
amp_params.pop(AMP_ENABLED)
return amp_params
else:
return False
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_bfloat16_enabled(param_dict):
for key in [BFLOAT16, BFLOAT16_OLD]:
if key in param_dict.keys():
return get_scalar_param(param_dict[key], BFLOAT16_ENABLED, BFLOAT16_ENABLED_DEFAULT)
return False
def get_fp16_master_weights_and_grads_enabled(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_MASTER_WEIGHTS_AND_GRADS, FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT)
else:
return False
def get_fp16_auto_cast(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT)
def get_loss_scale(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_LOSS_SCALE, FP16_LOSS_SCALE_DEFAULT)
elif get_bfloat16_enabled(param_dict):
return 1.0
else:
return FP16_LOSS_SCALE_DEFAULT
def get_initial_dynamic_scale(param_dict):
if get_fp16_enabled(param_dict):
initial_scale_power = get_scalar_param(param_dict[FP16], FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
elif get_bfloat16_enabled(param_dict):
initial_scale_power = 0
else:
initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
return 2**initial_scale_power
def get_dynamic_loss_scale_args(param_dict):
loss_scale_args = None
if get_fp16_enabled(param_dict):
fp16_dict = param_dict[FP16]
dynamic_loss_args = [
FP16_INITIAL_SCALE_POWER,
FP16_LOSS_SCALE_WINDOW,
FP16_MIN_LOSS_SCALE,
FP16_HYSTERESIS,
]
if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
init_scale = get_scalar_param(fp16_dict, FP16_INITIAL_SCALE_POWER, FP16_INITIAL_SCALE_POWER_DEFAULT)
scale_window = get_scalar_param(fp16_dict, FP16_LOSS_SCALE_WINDOW, FP16_LOSS_SCALE_WINDOW_DEFAULT)
delayed_shift = get_scalar_param(fp16_dict, FP16_HYSTERESIS, FP16_HYSTERESIS_DEFAULT)
min_loss_scale = get_scalar_param(fp16_dict, FP16_MIN_LOSS_SCALE, FP16_MIN_LOSS_SCALE_DEFAULT)
loss_scale_args = {
INITIAL_LOSS_SCALE: 2**init_scale,
SCALE_WINDOW: scale_window,
DELAYED_SHIFT: delayed_shift,
MIN_LOSS_SCALE: min_loss_scale,
}
return loss_scale_args
def get_gradient_accumulation_steps(param_dict):
return get_scalar_param(param_dict, GRADIENT_ACCUMULATION_STEPS, GRADIENT_ACCUMULATION_STEPS_DEFAULT)
def get_sparse_gradients_enabled(param_dict):
return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT)
def get_communication_data_type(param_dict):
val = get_scalar_param(param_dict, COMMUNICATION_DATA_TYPE, COMMUNICATION_DATA_TYPE_DEFAULT)
val = val.lower() if val is not None else val
if val is None:
return val # we must determine it by other parameters
elif val == "fp32":
return torch.float32
elif val == "fp16":
return torch.float16
elif val == "bfp16":
return torch.bfloat16
raise ValueError(f"Invalid communication_data_type. Supported data types: ['fp16', 'bfp16', 'fp32']. Got: {val}")
def get_prescale_gradients(param_dict):
return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT)
def get_gradient_predivide_factor(param_dict):
return get_scalar_param(param_dict, GRADIENT_PREDIVIDE_FACTOR, GRADIENT_PREDIVIDE_FACTOR_DEFAULT)
def get_steps_per_print(param_dict):
return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT)
def get_disable_allgather(param_dict):
return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT)
def get_dump_state(param_dict):
return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT)
def get_gradient_clipping(param_dict):
return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT)
def get_sparse_attention(param_dict):
if SPARSE_ATTENTION in param_dict.keys():
sparsity = param_dict[SPARSE_ATTENTION]
mode = get_sparse_attention_mode(sparsity)
if mode == SPARSE_DENSE_MODE:
return get_sparse_dense_config(sparsity)
elif mode == SPARSE_FIXED_MODE:
return get_sparse_fixed_config(sparsity)
elif mode == SPARSE_VARIABLE_MODE:
return get_sparse_variable_config(sparsity)
elif mode == SPARSE_BIGBIRD_MODE:
return get_sparse_bigbird_config(sparsity)
elif mode == SPARSE_BSLONGFORMER_MODE:
return get_sparse_bslongformer_config(sparsity)
else:
raise NotImplementedError(f"Given sparsity mode, {mode}, has not been implemented yet!")
else:
return None
def get_sparse_dense_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block}
def get_sparse_fixed_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_local_blocks = get_scalar_param(sparsity, SPARSE_NUM_LOCAL_BLOCKS, SPARSE_NUM_LOCAL_BLOCKS_DEFAULT)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
num_different_global_patterns = get_scalar_param(
sparsity,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_FIXED_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns,
}
def get_sparse_variable_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
local_window_blocks = get_scalar_param(sparsity, SPARSE_LOCAL_WINDOW_BLOCKS, SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_VARIABLE_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
}
def get_sparse_bigbird_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
return {
SPARSE_MODE: SPARSE_BIGBIRD_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
}
def get_sparse_bslongformer_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_BSLONGFORMER_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
}
def get_sparse_attention_mode(param_dict):
if SPARSE_MODE in param_dict.keys():
return param_dict[SPARSE_MODE]
else:
return SPARSE_MODE_DEFAULT
def get_sparse_attention_type(param_dict):
if SPARSE_ATTENTION_TYPE in param_dict.keys():
return param_dict[SPARSE_ATTENTION_TYPE]
else:
return SPARSE_ATTENTION_TYPE_DEFAULT
def get_pipeline_config(param_dict):
"""Parses pipeline engine configuration. """
default_pipeline = {
"stages": "auto",
"partition": "best",
"seed_layers": False,
"activation_checkpoint_interval": 0,
}
config = default_pipeline
for key, val in param_dict.get("pipeline", {}).items():
config[key] = val
return config
def get_optimizer_name(param_dict):
if OPTIMIZER in param_dict.keys() and TYPE in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][TYPE]
else:
return OPTIMIZER_TYPE_DEFAULT
def get_optimizer_params(param_dict):
if (get_optimizer_name(param_dict) is not None and OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys()):
return param_dict[OPTIMIZER][OPTIMIZER_PARAMS]
else:
return None
def get_optimizer_gradient_clipping(param_dict):
optimizer_params = get_optimizer_params(param_dict)
if optimizer_params is not None and MAX_GRAD_NORM in optimizer_params.keys():
return optimizer_params[MAX_GRAD_NORM]
else:
return None
def get_optimizer_legacy_fusion(param_dict):
if OPTIMIZER in param_dict.keys() and LEGACY_FUSION in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][LEGACY_FUSION]
else:
return LEGACY_FUSION_DEFAULT
def get_zero_allow_untested_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_ALLOW_UNTESTED_OPTIMIZER, ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT)
def get_zero_force_ds_cpu_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT)
def get_scheduler_name(param_dict):
if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys():
return param_dict[SCHEDULER][TYPE]
else:
return SCHEDULER_TYPE_DEFAULT
def get_scheduler_params(param_dict):
if (get_scheduler_name(param_dict) is not None and SCHEDULER_PARAMS in param_dict[SCHEDULER].keys()):
return param_dict[SCHEDULER][SCHEDULER_PARAMS]
else:
return None
def get_train_batch_size(param_dict):
return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT)
def get_train_micro_batch_size_per_gpu(param_dict):
return get_scalar_param(
param_dict,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT,
)
def get_wall_clock_breakdown(param_dict):
return get_scalar_param(param_dict, WALL_CLOCK_BREAKDOWN, WALL_CLOCK_BREAKDOWN_DEFAULT)
def get_memory_breakdown(param_dict):
return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT)
class HybridEngineConfig(DeepSpeedConfigModel):
enabled: bool = False
max_out_tokens: int = 512
inference_tp_size: int = 1
release_inference_cache: bool = False
pin_parameters: bool = True
tp_gather_partition_size: int = 8
def get_hybrid_engine_config(param_dict):
hybrid_engine_config_dict = param_dict.get("hybrid_engine", {})
hybrid_engine_config = HybridEngineConfig(**hybrid_engine_config_dict)
return hybrid_engine_config
def get_eigenvalue_config(param_dict):
if get_quantize_enabled(param_dict):
param_dict = param_dict[QUANTIZE_TRAINING]
assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled"
return (
get_eigenvalue_enabled(param_dict),
get_eigenvalue_verbose(param_dict),
get_eigenvalue_max_iter(param_dict),
get_eigenvalue_tol(param_dict),
get_eigenvalue_stability(param_dict),
get_eigenvalue_gas_boundary_resolution(param_dict),
get_eigenvalue_layer_name(param_dict),
get_eigenvalue_layer_num(param_dict),
)
else:
return (
EIGENVALUE_ENABLED_DEFAULT,
EIGENVALUE_VERBOSE_DEFAULT,
EIGENVALUE_MAX_ITER_DEFAULT,
EIGENVALUE_TOL_DEFAULT,
EIGENVALUE_STABILITY_DEFAULT,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
EIGENVALUE_LAYER_NAME_DEFAULT,
EIGENVALUE_LAYER_NUM_DEFAULT,
)
def get_eigenvalue_enabled(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_ENABLED, EIGENVALUE_ENABLED_DEFAULT)
else:
return EIGENVALUE_ENABLED_DEFAULT
def get_eigenvalue_verbose(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_VERBOSE, EIGENVALUE_VERBOSE_DEFAULT)
else:
return EIGENVALUE_VERBOSE_DEFAULT
def get_eigenvalue_max_iter(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_MAX_ITER, EIGENVALUE_MAX_ITER_DEFAULT)
else:
return EIGENVALUE_MAX_ITER_DEFAULT
def get_eigenvalue_tol(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_TOL, EIGENVALUE_TOL_DEFAULT)
else:
return EIGENVALUE_TOL_DEFAULT
def get_eigenvalue_stability(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_STABILITY, EIGENVALUE_STABILITY_DEFAULT)
else:
return EIGENVALUE_STABILITY_DEFAULT
def get_eigenvalue_gas_boundary_resolution(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(
param_dict[EIGENVALUE],
EIGENVALUE_GAS_BOUNDARY_RESOLUTION,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
)
else:
return EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT
def get_eigenvalue_layer_name(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NAME, EIGENVALUE_LAYER_NAME_DEFAULT)
else:
return EIGENVALUE_LAYER_NAME_DEFAULT
def get_eigenvalue_layer_num(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NUM, EIGENVALUE_LAYER_NUM_DEFAULT)
else:
return EIGENVALUE_LAYER_NUM_DEFAULT
def get_checkpoint_params(param_dict):
return param_dict.get(CHECKPOINT, {})
def get_data_types_params(param_dict):
return param_dict.get(DATA_TYPES, {})
def get_checkpoint_tag_validation_mode(checkpoint_params):
tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT)
tag_validation_mode = tag_validation_mode.upper()
if tag_validation_mode in CHECKPOINT_TAG_VALIDATION_MODES:
return tag_validation_mode
else:
raise DeepSpeedConfigError(
"Checkpoint config contains invalid tag_validation "
f"value of {tag_validation_mode}, expecting one of {CHECKPOINT_TAG_VALIDATION_MODES}")
def get_checkpoint_parallel_write_pipeline(checkpoint_params):
par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {})
par_write_pipeline = par_write_params.get(CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE,
CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT)
if par_write_pipeline in [True, False]:
return par_write_pipeline
else:
raise DeepSpeedConfigError("checkpoint::parallel_write::pipeline_stage "
f"value of '{par_write_pipeline}' is invalid, expecting: true or false")
def get_dataloader_drop_last(param_dict):
return get_scalar_param(param_dict, DATALOADER_DROP_LAST, DATALOADER_DROP_LAST_DEFAULT)
'''Write deepspeed config files by modifying basic templates.
Can be used for quickly changing parameters via command line parameters.'''
class DeepSpeedConfigWriter:
def __init__(self, data=None):
self.data = data if data is not None else {}
def add_config(self, key, value):
self.data[key] = value
def load_config(self, filename):
self.data = json.load(open(filename, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
def write_config(self, filename):
with open(filename, "w") as outfile:
json.dump(self.data, outfile)
class DeepSpeedConfig(object):
def __init__(self, config: Union[str, dict], mpu=None):
super(DeepSpeedConfig, self).__init__()
if isinstance(config, dict):
self._param_dict = config
elif os.path.exists(config):
self._param_dict = hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
try:
config_decoded = base64.urlsafe_b64decode(config).decode('utf-8')
self._param_dict = hjson.loads(config_decoded)
except (UnicodeDecodeError, AttributeError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}"
)
try:
self.global_rank = dist.get_rank()
if mpu is None:
self.world_size = dist.get_world_size()
else:
self.world_size = mpu.get_data_parallel_world_size()
except:
self.global_rank = 0
self.world_size = 1
# If elastic-mode enabled, update compute + update _param_dict
self.elasticity_enabled = elasticity_enabled(self._param_dict)
if self.elasticity_enabled:
logger.info("DeepSpeed elasticity support enabled")
final_batch_size, valid_gpus, micro_batch_size = compute_elastic_config(
ds_config=self._param_dict,
target_deepspeed_version=__version__,
world_size=self.world_size,
)
elastic_dict = self._param_dict[ELASTICITY]
# Ensure the resource scheduler saw the same elastic config we are using at runtime
ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict)
self.elastic_model_parallel_size = elastic_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
if self.elastic_model_parallel_size < 1:
raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.elastic_model_parallel_size}")
self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError("NUmber of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
ignore_non_elastic_batch_info = elastic_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
if not ignore_non_elastic_batch_info:
batch_params = [
TRAIN_BATCH_SIZE,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
GRADIENT_ACCUMULATION_STEPS,
]
if any(map(lambda t: t in self._param_dict, batch_params)):
raise ElasticityConfigError("One or more batch related parameters were found in your " \
f"ds_config ({TRAIN_BATCH_SIZE}, {TRAIN_MICRO_BATCH_SIZE_PER_GPU}, and/or " \
f"{GRADIENT_ACCUMULATION_STEPS}). These parameters *will not be used* since " \
"elastic training is enabled, which takes control of these parameters. " \
"If you want to suppress this error (the parameters will be silently ignored) " \
f"please set {IGNORE_NON_ELASTIC_BATCH_INFO}':true in your elasticity config.")
# micro_bsz * world_size * gas = total_batch_size
# gas = total_batch_size // (micro_bsz * world_size)
gradient_accu_steps = final_batch_size // (micro_batch_size * self.world_size)
if TRAIN_BATCH_SIZE in self._param_dict:
logger.warning("[Elasticity] overriding training_batch_size: "
f"{self._param_dict[TRAIN_BATCH_SIZE]} -> {final_batch_size}")
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self._param_dict:
logger.warning("[Elasticity] overriding train_micro_batch_size_per_gpu: "
f"{self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU]} -> {micro_batch_size}")
if GRADIENT_ACCUMULATION_STEPS in self._param_dict:
logger.warning("[Elasticity] overriding gradient_accumulation_steps: "
f"{self._param_dict[GRADIENT_ACCUMULATION_STEPS]} -> {gradient_accu_steps}")
logger.info(f"[Elasticity] valid GPU counts: {valid_gpus}")
self._param_dict[TRAIN_BATCH_SIZE] = final_batch_size
self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size
self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps
# Pass a copy so that user json is unmodified, e.g. for logging
self._initialize_params(copy.copy(self._param_dict))
self._configure_train_batch_size()
self._do_sanity_check()
def _initialize_params(self, param_dict):
self.train_batch_size = get_train_batch_size(param_dict)
#print(f"beginning get_train_batch_size = {get_train_batch_size}")
self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(param_dict)
self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict)
self.steps_per_print = get_steps_per_print(param_dict)
self.dump_state = get_dump_state(param_dict)
self.disable_allgather = get_disable_allgather(param_dict)
self.communication_data_type = get_communication_data_type(param_dict)
self.prescale_gradients = get_prescale_gradients(param_dict)
self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict)
self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict)
self.zero_config = get_zero_config(param_dict)
self.mics_shard_size = self.zero_config.mics_shard_size
self.mics_hierarchial_params_gather = self.zero_config.mics_hierarchical_params_gather
self.zero_optimization_stage = self.zero_config.stage
self.zero_enabled = self.zero_optimization_stage > 0
self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(param_dict)
self.comms_config = DeepSpeedCommsConfig(param_dict)
self.monitor_config = get_monitor_config(param_dict)
self.gradient_clipping = get_gradient_clipping(param_dict)
self.fp16_enabled = get_fp16_enabled(param_dict)
self.fp16_auto_cast = get_fp16_auto_cast(param_dict)
self.bfloat16_enabled = get_bfloat16_enabled(param_dict)
assert not (self.fp16_enabled
and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled'
self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict)
self.amp_enabled = get_amp_enabled(param_dict)
self.amp_params = get_amp_params(param_dict)
self.loss_scale = get_loss_scale(param_dict)
self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict)
self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict)
self.compression_config = get_compression_config(param_dict)
self.optimizer_name = get_optimizer_name(param_dict)
if (self.optimizer_name is not None and self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS):
self.optimizer_name = self.optimizer_name.lower()
self.optimizer_params = get_optimizer_params(param_dict)
self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict)
self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(param_dict)
self.zero_force_ds_cpu_optimizer = get_zero_force_ds_cpu_optimizer(param_dict)
self.scheduler_name = get_scheduler_name(param_dict)
self.scheduler_params = get_scheduler_params(param_dict)
self.flops_profiler_config = DeepSpeedFlopsProfilerConfig(param_dict)
self.wall_clock_breakdown = (get_wall_clock_breakdown(param_dict) | self.flops_profiler_config.enabled)
self.memory_breakdown = get_memory_breakdown(param_dict)
self.autotuning_config = DeepSpeedAutotuningConfig(param_dict)
(
self.eigenvalue_enabled,
self.eigenvalue_verbose,
self.eigenvalue_max_iter,
self.eigenvalue_tol,
self.eigenvalue_stability,
self.eigenvalue_gas_boundary_resolution,
self.eigenvalue_layer_name,
self.eigenvalue_layer_num,
) = get_eigenvalue_config(param_dict)
self.hybrid_engine = get_hybrid_engine_config(param_dict)
self.sparse_attention = get_sparse_attention(param_dict)
self.pipeline = get_pipeline_config(param_dict)
self.pld_enabled = get_pld_enabled(param_dict)
self.pld_params = get_pld_params(param_dict)
self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict)
self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict)
self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict)
self.data_efficiency_config = get_data_efficiency_config(param_dict)
checkpoint_params = get_checkpoint_params(param_dict)
validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params)
self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE)
self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL
self.load_universal_checkpoint = checkpoint_params.get(LOAD_UNIVERSAL_CHECKPOINT,
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT)
self.use_node_local_storage = checkpoint_params.get(USE_NODE_LOCAL_STORAGE_CHECKPOINT,
USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT)
data_types_params = get_data_types_params(param_dict)
self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, GRAD_ACCUM_DTYPE_DEFAULT)
par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params)
self.checkpoint_parallel_write_pipeline = par_write_pipe
self.aio_config = get_aio_config(param_dict)
self.dataloader_drop_last = get_dataloader_drop_last(param_dict)
self.nebula_config = DeepSpeedNebulaConfig(param_dict)
def _batch_assertion(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
assert (train_batch > 0), f"Train batch size: {train_batch} has to be greater than 0"
assert (micro_batch > 0), f"Micro batch size per gpu: {micro_batch} has to be greater than 0"
assert (grad_acc > 0), f"Gradient accumulation steps: {grad_acc} has to be greater than 0"
assert train_batch == micro_batch * grad_acc * self.world_size, (
f"Check batch related parameters. train_batch_size is not equal "
"to micro_batch_per_gpu * gradient_acc_step * world_size "
f"{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}")
def _set_batch_related_parameters(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
#print(f"train_batch = {train_batch}, micro_batch={micro_batch}")
# all values are provided nothing needs to be set
if train_batch is not None and micro_batch is not None and grad_acc is not None:
return
# global_accumulation_steps needs to be set
elif train_batch is not None and micro_batch is not None:
grad_acc = train_batch // micro_batch
grad_acc //= self.world_size
self.gradient_accumulation_steps = grad_acc
# micro_batch_per_gpu needs to be set
elif train_batch is not None and grad_acc is not None:
micro_batch = train_batch // self.world_size
micro_batch //= grad_acc
self.train_micro_batch_size_per_gpu = micro_batch
# train_batch_size needs to be set
elif micro_batch is not None and grad_acc is not None:
train_batch_size = micro_batch * grad_acc
train_batch_size *= self.world_size
self.train_batch_size = train_batch_size
# gradient_accumulation_steps and micro_batch_per_gpus is set
elif train_batch is not None:
self.gradient_accumulation_steps = 1
self.train_micro_batch_size_per_gpu = train_batch // self.world_size
# train_batch_size and gradient_accumulation_step is set
elif micro_batch is not None:
self.train_batch_size = micro_batch * self.world_size
self.gradient_accumulation_steps = 1
# either none of the three parameters are provided or just gradient_accumulation_step is provided
else:
assert False, \
'Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided'
def _configure_train_batch_size(self):
self._set_batch_related_parameters()
self._batch_assertion()
def _do_sanity_check(self):
self._do_error_check()
self._do_warning_check()
def print_user_config(self):
logger.info(" json = {}".format(
json.dumps(
self._param_dict,
sort_keys=True,
indent=4,
cls=ScientificNotationEncoder,
separators=(",", ":"),
)))
def print(self, name):
logger.info("{}:".format(name))
for arg in sorted(vars(self)):
if arg != "_param_dict":
dots = "." * (29 - len(arg))
logger.info(" {} {} {}".format(arg, dots, getattr(self, arg)))
self.print_user_config()
def _do_error_check(self):
assert (self.train_micro_batch_size_per_gpu
), "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
assert (
self.gradient_accumulation_steps), "DeepSpeedConfig: {} is not defined".format(GRADIENT_ACCUMULATION_STEPS)
if self.zero_enabled:
assert (self.zero_optimization_stage <=
ZeroStageEnum.max_stage), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format(
ZeroStageEnum.max_stage)
if self.fp16_master_weights_and_gradients:
assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now."
def _do_warning_check(self):
fp16_enabled = self.fp16_enabled
vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT)
if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0:
logger.warning(
"DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization.".format(
vocabulary_size, TENSOR_CORE_ALIGN_SIZE))
if (self.optimizer_params is not None and MAX_GRAD_NORM in self.optimizer_params.keys()
and self.optimizer_params[MAX_GRAD_NORM] > 0):
if fp16_enabled:
if self.global_rank == 0:
logger.warning("DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper".format(
MAX_GRAD_NORM, self.optimizer_params[MAX_GRAD_NORM]))
else:
if self.global_rank == 0:
logger.warning(
"DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero"
.format(self.optimizer_params[MAX_GRAD_NORM]))
self.optimizer_params[MAX_GRAD_NORM] = 0.0
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Implementation of learning rate schedules.
Taken and modified from PyTorch v1.0.1 source
https://github.com/pytorch/pytorch/blob/v1.1.0/torch/optim/lr_scheduler.py
"""
import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
LR_SCHEDULE = 'lr_schedule'
LR_RANGE_TEST = 'LRRangeTest'
ONE_CYCLE = 'OneCycle'
WARMUP_LR = 'WarmupLR'
WARMUP_DECAY_LR = 'WarmupDecayLR'
VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR]
LR_RANGE_TEST_MIN_LR = 'lr_range_test_min_lr'
LR_RANGE_TEST_STEP_RATE = 'lr_range_test_step_rate'
LR_RANGE_TEST_STEP_SIZE = 'lr_range_test_step_size'
LR_RANGE_TEST_STAIRCASE = 'lr_range_test_staircase'
EDGE_VALUE = 'edge_value'
MID_VALUE = 'mid_value'
CYCLE_FIRST_STEP_SIZE = 'cycle_first_step_size'
CYCLE_FIRST_STAIR_COUNT = 'cycle_first_stair_count'
CYCLE_SECOND_STEP_SIZE = 'cycle_second_step_size'
CYCLE_SECOND_STAIR_COUNT = 'cycle_second_stair_count'
DECAY_STEP_SIZE = 'decay_step_size'
CYCLE_MIN_LR = 'cycle_min_lr'
CYCLE_MAX_LR = 'cycle_max_lr'
DECAY_LR_RATE = 'decay_lr_rate'
CYCLE_MIN_MOM = 'cycle_min_mom'
CYCLE_MAX_MOM = 'cycle_max_mom'
DECAY_MOM_RATE = 'decay_mom_rate'
WARMUP_MIN_LR = 'warmup_min_lr'
WARMUP_MAX_LR = 'warmup_max_lr'
WARMUP_NUM_STEPS = 'warmup_num_steps'
WARMUP_TYPE = 'warmup_type'
WARMUP_LOG_RATE = 'log'
WARMUP_LINEAR_RATE = 'linear'
TOTAL_NUM_STEPS = 'total_num_steps'
def add_tuning_arguments(parser):
group = parser.add_argument_group('Convergence Tuning', 'Convergence tuning configurations')
# LR scheduler
group.add_argument('--lr_schedule', type=str, default=None, help='LR schedule for training.')
# Learning rate range test
group.add_argument("--lr_range_test_min_lr", type=float, default=0.001, help='Starting lr value.')
group.add_argument("--lr_range_test_step_rate", type=float, default=1.0, help='scaling rate for LR range test.')
group.add_argument("--lr_range_test_step_size", type=int, default=1000, help='training steps per LR change.')
group.add_argument("--lr_range_test_staircase",
type=bool,
default=False,
help='use staircase scaling for LR range test.')
# OneCycle schedule
group.add_argument("--cycle_first_step_size",
type=int,
default=1000,
help='size of first step of 1Cycle schedule (training steps).')
group.add_argument("--cycle_first_stair_count",
type=int,
default=-1,
help='first stair count for 1Cycle schedule.')
group.add_argument("--cycle_second_step_size",
type=int,
default=-1,
help='size of second step of 1Cycle schedule (default first_step_size).')
group.add_argument("--cycle_second_stair_count",
type=int,
default=-1,
help='second stair count for 1Cycle schedule.')
group.add_argument("--decay_step_size",
type=int,
default=1000,
help='size of intervals for applying post cycle decay (training steps).')
# 1Cycle LR
group.add_argument("--cycle_min_lr", type=float, default=0.01, help='1Cycle LR lower bound.')
group.add_argument("--cycle_max_lr", type=float, default=0.1, help='1Cycle LR upper bound.')
group.add_argument("--decay_lr_rate", type=float, default=0.0, help='post cycle LR decay rate.')
# 1Cycle Momentum
group.add_argument('--cycle_momentum', default=False, action='store_true', help='Enable 1Cycle momentum schedule.')
group.add_argument("--cycle_min_mom", type=float, default=0.8, help='1Cycle momentum lower bound.')
group.add_argument("--cycle_max_mom", type=float, default=0.9, help='1Cycle momentum upper bound.')
group.add_argument("--decay_mom_rate", type=float, default=0.0, help='post cycle momentum decay rate.')
# Warmup LR
group.add_argument('--warmup_min_lr', type=float, default=0, help='WarmupLR minimum/initial LR value')
group.add_argument('--warmup_max_lr', type=float, default=0.001, help='WarmupLR maximum LR value.')
group.add_argument('--warmup_num_steps', type=int, default=1000, help='WarmupLR step count for LR warmup.')
group.add_argument('--warmup_type',
type=str,
default=WARMUP_LOG_RATE,
help='WarmupLR increasing function during warmup')
return parser
def parse_arguments():
parser = argparse.ArgumentParser()
parser = add_tuning_arguments(parser)
lr_sched_args, unknown_args = parser.parse_known_args()
return lr_sched_args, unknown_args
def override_lr_range_test_params(args, params):
if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None:
params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr
if hasattr(args, LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None:
params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate
if hasattr(args, LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None:
params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size
if hasattr(args, LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None:
params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase
def override_1cycle_params(args, params):
if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None:
params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size
if hasattr(args, CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None:
params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count
if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None:
params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size
if hasattr(args, CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None:
params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count
if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None:
params[DECAY_STEP_SIZE] = args.decay_step_size
# 1Cycle LR params
if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None:
params[CYCLE_MIN_LR] = args.cycle_min_lr
if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None:
params[CYCLE_MAX_LR] = args.cycle_max_lr
if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None:
params[DECAY_LR_RATE] = args.decay_lr_rate
# 1Cycle MOM params
if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None:
params[CYCLE_MIN_MOM] = args.cycle_min_mom
if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None:
params[CYCLE_MAX_MOM] = args.cycle_max_mom
if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None:
params[DECAY_MOM_RATE] = args.decay_mom_rate
def override_warmupLR_params(args, params):
if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None:
params[WARMUP_MIN_LR] = args.warmup_min_lr
if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None:
params[WARMUP_MAX_LR] = args.warmup_max_lr
if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None:
params[WARMUP_NUM_STEPS] = args.warmup_num_steps
if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None:
params[WARMUP_TYPE] = args.warmup_type
def override_params(args, params):
# LR range test params
override_lr_range_test_params(args, params)
# 1Cycle params
override_1cycle_params(args, params)
# WarmupLR params
override_warmupLR_params(args, params)
def get_config_from_args(args):
if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None:
return None, '--{} not specified on command line'.format(LR_SCHEDULE)
if not args.lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not supported LR schedule'.format(args.lr_schedule)
config = {}
config['type'] = args.lr_schedule
config['params'] = {}
if args.lr_schedule == LR_RANGE_TEST:
override_lr_range_test_params(args, config['params'])
elif args.lr_schedule == ONE_CYCLE:
override_1cycle_params(args, config['params'])
else:
override_warmupLR_params(args, config['params'])
return config, None
def get_lr_from_config(config):
if not 'type' in config:
return None, 'LR schedule type not defined in config'
if not 'params' in config:
return None, 'LR schedule params not defined in config'
lr_schedule = config['type']
lr_params = config['params']
if not lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not a valid LR schedule'.format(lr_schedule)
if lr_schedule == LR_RANGE_TEST:
return lr_params[LR_RANGE_TEST_MIN_LR], ''
if lr_schedule == ONE_CYCLE:
return lr_params[CYCLE_MAX_LR], ''
# Warmup LR
return lr_params[WARMUP_MAX_LR], ''
"""
Only optimizers that are subclass of torch.optim.Optimizer are supported. So check the passed optimizer and wrapped
optimizer to see if requirement is satisfied.
TODO: Looking under the hood to examine the wrapped optimizer is a hack that requires a better long-term fix.
"""
def get_torch_optimizer(optimizer):
if isinstance(optimizer, Optimizer):
return optimizer
if hasattr(optimizer, 'optimizer') and isinstance(optimizer.optimizer, Optimizer):
return optimizer.optimizer
raise TypeError('{} is not a subclass of torch.optim.Optimizer'.format(type(optimizer).__name__))
class LRRangeTest(object):
"""Sets the learning rate of each parameter group according to
learning rate range test (LRRT) policy. The policy increases learning
rate starting from a base value with a constant frequency, as detailed in
the paper `A disciplined approach to neural network hyper-parameters: Part1`_.
LRRT policy is used for finding maximum LR that trains a model without divergence, and can be used to
configure the LR boundaries for Cyclic LR schedules.
LRRT changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_range_test_min_lr (float or list): Initial learning rate which is the
lower boundary in the range test for each parameter group.
lr_range_test_step_size (int): Interval of training steps to increase learning rate. Default: 2000
lr_range_test_step_rate (float): Scaling rate for range test. Default: 1.0
lr_range_test_staircase (bool): Scale in staircase fashion, rather than continuous. Default: False.
last_batch_iteration (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_batch_iteration=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = LRRangeTest(optimizer)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
_A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay:
https://arxiv.org/abs/1803.09820
"""
def __init__(self,
optimizer: Optimizer,
lr_range_test_min_lr: float = 1e-3,
lr_range_test_step_size: int = 2000,
lr_range_test_step_rate: float = 1.0,
lr_range_test_staircase: bool = False,
last_batch_iteration: int = -1):
self.optimizer = get_torch_optimizer(optimizer)
if isinstance(lr_range_test_min_lr, list) or isinstance(lr_range_test_min_lr, tuple):
if len(lr_range_test_min_lr) != len(self.optimizer.param_groups):
raise ValueError("expected {} lr_range_test_min_lr, got {}".format(len(self.optimizer.param_groups),
len(lr_range_test_min_lr)))
self.min_lr = list(lr_range_test_min_lr)
else:
self.min_lr = [lr_range_test_min_lr] * len(self.optimizer.param_groups)
self.step_size = lr_range_test_step_size
self.step_rate = lr_range_test_step_rate
self.last_batch_iteration = last_batch_iteration
self.staircase = lr_range_test_staircase
self.interval_fn = self._staircase_interval if lr_range_test_staircase else self._continuous_interval
if last_batch_iteration == -1:
self._update_optimizer(self.min_lr)
def _staircase_interval(self):
return math.floor(float(self.last_batch_iteration + 1) / self.step_size)
def _continuous_interval(self):
return float(self.last_batch_iteration + 1) / self.step_size
def _get_increase(self):
return (1 + self.step_rate * self.interval_fn())
def get_lr(self):
lr_increase = self._get_increase()
return [lr_range_test_min_lr * lr_increase for lr_range_test_min_lr in self.min_lr]
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def _update_optimizer(self, group_lrs):
for param_group, lr in zip(self.optimizer.param_groups, group_lrs):
param_group['lr'] = lr
def step(self, batch_iteration=None):
if batch_iteration is None:
batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = batch_iteration
self._update_optimizer(self.get_lr())
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
class OneCycle(object):
"""Sets the learning rate of each parameter group according to
1Cycle learning rate policy (1CLR). 1CLR is a variation of the
Cyclical Learning Rate (CLR) policy that involves one cycle followed by
decay. The policy simultaneously cycles the learning rate (and momentum)
between two boundaries with a constant frequency, as detailed in
the paper `A disciplined approach to neural network hyper-parameters`_.
1CLR policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This implementation was adapted from the github repo: `pytorch/pytorch`_
Args:
optimizer (Optimizer): Wrapped optimizer.
cycle_min_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
cycle_max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (cycle_max_lr - cycle_min_lr).
The lr at any cycle is the sum of cycle_min_lr
and some scaling of the amplitude; therefore
cycle_max_lr may not actually be reached depending on
scaling function.
decay_lr_rate(float): Decay rate for learning rate. Default: 0.
cycle_first_step_size (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
cycle_second_step_size (int): Number of training iterations in the
decreasing half of a cycle. If cycle_second_step_size is None,
it is set to cycle_first_step_size. Default: None
cycle_first_stair_count(int): Number of stairs in first half of cycle phase. This means
lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
cycle_second_stair_count(int): Number of stairs in second half of cycle phase. This means
lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
decay_step_size (int): Intervals for applying decay in decay phase. Default: 0, means no decay.
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'cycle_min_mom' and 'cycle_max_mom'.
Default: True
cycle_min_mom (float or list): Initial momentum which is the
lower boundary in the cycle for each parameter group.
Default: 0.8
cycle_max_mom (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (cycle_max_mom - cycle_min_mom).
The momentum at any cycle is the difference of cycle_max_mom
and some scaling of the amplitude; therefore
cycle_min_mom may not actually be reached depending on
scaling function. Default: 0.9
decay_mom_rate (float): Decay rate for momentum. Default: 0.
last_batch_iteration (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_batch_iteration=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = OneCycle(optimizer, 0.0001, 0.0010)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay: https://arxiv.org/abs/1803.09820
"""
def __init__(self,
optimizer,
cycle_min_lr,
cycle_max_lr,
decay_lr_rate=0.,
cycle_first_step_size=2000,
cycle_second_step_size=None,
cycle_first_stair_count=0,
cycle_second_stair_count=None,
decay_step_size=0,
cycle_momentum=True,
cycle_min_mom=0.8,
cycle_max_mom=0.9,
decay_mom_rate=0.,
last_batch_iteration=-1):
self.optimizer = get_torch_optimizer(optimizer)
# Initialize cycle shape
self._initialize_cycle(cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
cycle_second_stair_count, decay_step_size)
# Initialize cycle lr
self._initialize_lr(self.optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration)
# Initialize cyclic momentum
self.cycle_momentum = cycle_momentum
if cycle_momentum:
self._initialize_momentum(self.optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate,
last_batch_iteration)
# Initialize batch iteration tracker
self.last_batch_iteration = last_batch_iteration
# Configure cycle shape
def _initialize_cycle(self, cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
cycle_second_stair_count, decay_step_size):
cycle_first_step_size = float(cycle_first_step_size)
cycle_second_step_size = float(
cycle_second_step_size) if cycle_second_step_size is not None else cycle_first_step_size
self.total_size = cycle_first_step_size + cycle_second_step_size
self.step_ratio = cycle_first_step_size / self.total_size
self.first_stair_count = cycle_first_stair_count
self.second_stair_count = cycle_first_stair_count if cycle_second_stair_count is None else cycle_second_stair_count
self.decay_step_size = decay_step_size
if math.isclose(self.decay_step_size, 0):
self.skip_lr_decay = True
self.skip_mom_decay = True
else:
self.skip_lr_decay = False
self.skip_mom_decay = False
# Configure lr schedule
def _initialize_lr(self, optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration):
self.min_lrs = [cycle_min_lr] * len(optimizer.param_groups)
if last_batch_iteration == -1:
for lr, group in zip(self.min_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = [cycle_max_lr] * len(optimizer.param_groups)
self.decay_lr_rate = decay_lr_rate
if math.isclose(self.decay_lr_rate, 0):
self.skip_lr_decay = True
# Configure momentum schedule
def _initialize_momentum(self, optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate, last_batch_iteration):
if 'betas' not in optimizer.defaults:
optimizer_name = type(optimizer).__name__
logger.warn(
f"cycle_momentum is disabled because optimizer {optimizer_name} does not support momentum, no betas attribute in defaults"
)
self.cycle_momentum = False
return
self.decay_mom_rate = decay_mom_rate
self.min_moms = [(cycle_min_mom, 0.99)] * len(optimizer.param_groups)
self.max_moms = [(cycle_max_mom, 0.99)] * len(optimizer.param_groups)
if last_batch_iteration == -1:
for momentum, group in zip(self.min_moms, optimizer.param_groups):
group['betas'] = momentum
if math.isclose(self.decay_mom_rate, 0):
self.skip_mom_decay = True
def _get_scale_factor(self):
batch_iteration = (self.last_batch_iteration + 1)
cycle = math.floor(1 + batch_iteration / self.total_size)
x = 1. + batch_iteration / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
return scale_factor
def _get_cycle_mom(self):
scale_factor = self._get_scale_factor()
momentums = []
for base_betas, max_betas in zip(self.min_moms, self.max_moms):
cycle_min_mom = base_betas[0]
cycle_max_mom = max_betas[0]
base_height = (cycle_max_mom - cycle_min_mom) * scale_factor
momentum = cycle_max_mom - base_height
momentums.append((momentum, base_betas[1]))
return momentums
def _get_cycle_lr(self):
scale_factor = self._get_scale_factor()
lrs = []
for cycle_min_lr, cycle_max_lr in zip(self.min_lrs, self.max_lrs):
base_height = (cycle_max_lr - cycle_min_lr) * scale_factor
lr = cycle_min_lr + base_height
lrs.append(lr)
return lrs
def _get_decay_mom(self, decay_batch_iteration):
if self.skip_mom_decay:
return self.max_moms
decay_interval = decay_batch_iteration / self.decay_step_size
mom_decay_factor = (1 + self.decay_mom_rate * decay_interval)
momentums = [(beta0 * mom_decay_factor, beta1) for beta0, beta1 in self.max_moms]
return momentums
def _get_decay_lr(self, decay_batch_iteration):
"""Calculates the learning rate at batch index. This function is used
after the cycle completes and post cycle decaying of lr/mom is enabled.
This function treats `self.last_batch_iteration` as the last batch index.
"""
if self.skip_lr_decay:
return self.min_lrs
decay_interval = decay_batch_iteration / self.decay_step_size
lr_decay_factor = (1 + self.decay_lr_rate * decay_interval)
lrs = [cycle_min_lr / lr_decay_factor for cycle_min_lr in self.min_lrs]
return lrs
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
`self.last_batch_iteration` as the last batch index.
"""
if self.last_batch_iteration < self.total_size:
return self._get_cycle_lr()
return self._get_decay_lr(self.last_batch_iteration - self.total_size + 1)
def get_mom(self):
"""Calculates the momentum at batch index. This function treats
`self.last_batch_iteration` as the last batch index.
"""
if not self.cycle_momentum:
return None
if self.last_batch_iteration < self.total_size:
return self._get_cycle_mom()
return self._get_decay_mom(self.last_batch_iteration - self.total_size + 1)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def step(self, batch_iteration=None):
""" Updates the optimizer with the learning rate for the last batch index.
`self.last_batch_iteration` is treated as the last batch index.
If self.cycle_momentum is true, also updates optimizer momentum.
"""
if batch_iteration is None:
batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = batch_iteration
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
if self.cycle_momentum:
momentums = self.get_mom()
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
param_group['betas'] = momentum
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
class WarmupLR(object):
"""Increase the learning rate of each parameter group from min lr to max lr
over warmup_num_steps steps, and then fix at max lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_min_lr (float or list): minimum learning rate. Default: 0
warmup_max_lr (float or list): maximum learning rate. Default: 0.001
warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
last_batch_iteration (int): The index of the last batch. Default: -1.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = WarmupLR(optimizer)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
"""
def __init__(self,
optimizer: Optimizer,
warmup_min_lr: float = 0.0,
warmup_max_lr: float = 0.001,
warmup_num_steps: int = 1000,
warmup_type: str = WARMUP_LOG_RATE,
last_batch_iteration: int = -1):
self.optimizer = get_torch_optimizer(optimizer)
self.min_lrs = self._format_param(self.optimizer, warmup_min_lr, "min_lr")
self.max_lrs = self._format_param(self.optimizer, warmup_max_lr, "max_lr")
self.delta_lrs = [big - small for big, small in zip(self.max_lrs, self.min_lrs)]
self.warmup_num_steps = max(2, warmup_num_steps)
# Currently only support linear and log function
if warmup_type not in {WARMUP_LOG_RATE, WARMUP_LINEAR_RATE}:
logger.warning(f"Using unknown warmup_type: {warmup_type}. The increasing function "
f"is set to default (log)")
warmup_type = WARMUP_LOG_RATE
self.warmup_type = warmup_type
self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps)
self.last_batch_iteration = last_batch_iteration
def get_lr(self):
if self.last_batch_iteration < 0:
logger.warning("Attempting to get learning rate from scheduler before it has started")
return [0.0]
gamma = self._get_gamma()
return [min_lr + (delta_lr * gamma) for min_lr, delta_lr in zip(self.min_lrs, self.delta_lrs)]
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def step(self, last_batch_iteration=None):
if last_batch_iteration is None:
last_batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = last_batch_iteration
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
def _get_gamma(self):
if self.last_batch_iteration < self.warmup_num_steps:
if self.warmup_type == WARMUP_LOG_RATE:
return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
elif self.warmup_type == WARMUP_LINEAR_RATE:
return self.last_batch_iteration / self.warmup_num_steps
return 1.0
def _format_param(self, optimizer, param_value, param_name):
if isinstance(param_value, list) or isinstance(param_value, tuple):
if len(param_value) != len(optimizer.param_groups):
raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name,
FileNotFoundError(param_value)))
return list(param_value)
return [param_value] * len(optimizer.param_groups)
class WarmupDecayLR(WarmupLR):
"""Increase the learning rate of each parameter group from min lr to max lr
over warmup_num_steps steps, and then decay at linear rate over the remaining training steps.
Args:
optimizer (Optimizer): Wrapped optimizer.
total_num_steps (int): total number of training steps
warmup_min_lr (float or list): minimum learning rate. Default: 0
warmup_max_lr (float or list): maximum learning rate. Default: 0.001
warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
last_batch_iteration (int): The index of the last batch. Default: -1.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = WarmupDecayLR(optimizer, 1000000)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
"""
def __init__(self,
optimizer: Optimizer,
total_num_steps: int,
warmup_min_lr: float = 0.0,
warmup_max_lr: float = 0.001,
warmup_num_steps: int = 1000,
warmup_type: str = WARMUP_LOG_RATE,
last_batch_iteration: int = -1):
self.total_num_steps = total_num_steps
super(WarmupDecayLR, self).__init__(optimizer, warmup_min_lr, warmup_max_lr, warmup_num_steps, warmup_type,
last_batch_iteration)
if self.total_num_steps < self.warmup_num_steps:
logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format(
total_num_steps, warmup_num_steps))
def _get_gamma(self):
if self.last_batch_iteration < self.warmup_num_steps:
if self.warmup_type == WARMUP_LOG_RATE:
return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
elif self.warmup_type == WARMUP_LINEAR_RATE:
return self.last_batch_iteration / self.warmup_num_steps
return max(
0.0,
float(self.total_num_steps - self.last_batch_iteration) /
float(max(1.0, self.total_num_steps - self.warmup_num_steps)))
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
from deepspeed.utils import logger
from deepspeed.ops.quantizer import ds_quantizer
TWO_D_PARAMS = 6
class Quantizer(object):
def __init__(self,
q_groups=1,
q_mixed_fp16=False,
q_change_ratio=0.01,
q_type=0,
q_rounding=0,
q_verbose=False,
q_eigenvalue=False,
use_quantizer_kernel=False,
layer_num=0):
self.q_groups = q_groups
self.q_mixed_fp16 = q_mixed_fp16
self.q_change_ratio = q_change_ratio
self.q_type = q_type
self.qsteps = 0
self.quantize_real_ratio = 1.000
self.q_verbose = q_verbose
self.q_eigenvalue = q_eigenvalue
self.use_quantizer_kernel = use_quantizer_kernel
self.q_rounding = q_rounding
self.layer_num = layer_num
def any_precision_switch(self):
# Temporary disabled functionality
if self.layer_num == 0:
return True
result = False
for index in range(self.layer_num):
if self.q_start_bits[index] != self.q_target_bits:
next_step = self.qsteps + (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1))
if next_step >= self.q_period[index]:
result = True
return result
def quantize(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}):
if overflow and not eigenvalue_enabled:
return
self.step()
self.update_fp16_ratio()
for i in range(len(parameter_group)):
for p in parameter_group[i]:
if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits:
param_id = id(p)
if block_eigenvalue is None:
eigenvalue, layer_id = None, 0
else:
eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None,
0)
if eigenvalue is not None:
factor = 1 + math.floor(eigenvalue * 4)
p.data = self.compute_quantization(p.data, layer_id, factor)
else:
p.data = self.compute_quantization(p, layer_id)
def step(self):
self.qsteps += 1
def quantize_highbit(self, inputs, num_bits):
q_range = 2**num_bits
input_flat = inputs.reshape(self.q_groups, -1)
g_min = input_flat.amin(dim=-1, keepdim=True)
g_max = input_flat.amax(dim=-1, keepdim=True)
# Random number generator (Uniform)
if self.q_rounding == 'nearest':
p = 0.
else:
p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5)
if self.q_type == 'symmetric':
scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range
zero_point = 0.
input_flat = (input_flat / scale + p).round().clamp(-(q_range >> 1), (q_range >> 1) - 1) * scale
elif self.q_type == 'asymmetric':
scale = (g_max - g_min) / q_range
zero_point = (g_min / scale).round() * scale
input_flat = ((input_flat - zero_point) / scale + p).round().clamp(0, (q_range - 1)) * scale + zero_point
output = input_flat.reshape(inputs.shape).contiguous()
return output
def quantize_tenary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat)
pos = (input_flat > thres).type(inputs.type())
neg = (input_flat < -thres).type(inputs.type())
mask = (input_flat.abs() > thres).type(inputs.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(inputs.shape).contiguous()
return output
def quantize_binary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(inputs.shape).contiguous()
return output
def mixed_fp16_quantize(self, input, input_q, index):
if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1):
input_q = input * self.quantize_real_ratio + (1 - self.quantize_real_ratio) * input_q
return input_q
return input_q
def compute_quantization(self, input, index=0, factor=1):
# fixing the quantization bits based on the training steps
# when reducing 1 bit at each period, we increase the period
# to go slowly toward the target quantization bits
# the period and starting bit can be configured
if input.start_bits != input.target_bits:
if self.qsteps >= input.q_period:
self.quantize_real_ratio = 1.0
input.q_period <<= 1
input.q_period *= factor
input.start_bits -= 1
if self.q_verbose:
logger.info(
f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}'
)
assert (input.start_bits >= input.target_bits), \
'Quantization bit is lower than target precision bits!'
if self.use_quantizer_kernel:
if input.start_bits <= 2:
raise ValueError('Quantization bit is too low, please do it without quantization kernel!')
input_q = ds_quantizer(input.data.clone(),
self.q_groups,
input.start_bits,
asym=False if self.q_type == 'symmetric' else True,
sr=False if self.q_rounding == 'nearest_neighbor' else True)
else:
if input.start_bits >= 3:
input_flat = self.quantize_highbit(input.data, input.start_bits)
elif input.start_bits == 2:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_tenary(input.data)
elif input.start_bits == 1:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_binary(input.data)
if self.use_quantizer_kernel:
return self.mixed_fp16_quantize(input.data, input_q, index)
else:
if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1:
input_flat = self.quantize_real_ratio * input.data + \
(1 - self.quantize_real_ratio) * input_flat
return input_flat
def update_fp16_ratio(self):
if self.q_mixed_fp16:
if self.quantize_real_ratio > 0:
self.quantize_real_ratio -= self.q_change_ratio
else:
self.quantize_real_ratio = 0.000
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..module_inject.replace_policy import HFBertLayerPolicy, replace_policies
from deepspeed.accelerator import get_accelerator
class WeightQuantization(object):
def __init__(self, mlp_extra_grouping=True, mp_size=1):
self.dense_scales = []
self.qkv_scales = []
self.mlp4hh_scales = []
self.mlph4h_scales = []
self.mlp_extra_grouping = mlp_extra_grouping
self.mp_size = mp_size
def quantize_data(self, data, quantize_bits, groups, key=None):
data_groups = torch.split(data.float().view(-1), data.numel() // groups)
max_d = [max(g.max(), g.min().abs()) for g in data_groups]
data_scale = [float(1 << quantize_bits) / (2 * mx + 1e-5) for mx in max_d]
data_int = [(g * s) for g, s in zip(data_groups, data_scale)]
data_int = [
di.round().clamp(-(1 << (quantize_bits - 1)), (((1 << (quantize_bits - 1)) - 1))) for di in data_int
]
data_int = torch.cat(data_int).reshape(data.shape)
data_int = data_int.to(torch.int8)
data_scale = torch.cat([s.unsqueeze(0).unsqueeze(0) for s in data_scale])
return data_int, data_scale
def is_mlp(self, data, merge_count=1):
return ((self.mp_size *data.shape[0] * merge_count) / data.shape[1] == 4 or \
(self.mp_size *data.shape[1] * merge_count) / data.shape[0] == 4)
def is_qkv(self, data):
return ((self.mp_size * data.shape[0]) / data.shape[1] == 3 or \
(self.mp_size * data.shape[1]) / data.shape[0] == 3)
def Quantize(self, value_list, quantize_bits, groups, key, merge_dim=0):
if self.mlp_extra_grouping and self.is_mlp(value_list[0], merge_count=len(value_list)):
groups *= 2
q_scale = []
index = 0
for data in value_list:
data_int, data_scale = self.quantize_data(data, quantize_bits, groups, key)
q_scale.append(data_scale)
value_list[index] = data_int
index += 1
q_scale = (1 /
torch.cat(q_scale, dim=merge_dim).to(get_accelerator().current_device_name()).view(-1).unsqueeze(0))
if "mlp.dense_4h_to_h.weight" in key:
self.mlp4hh_scales.append(q_scale)
elif "mlp.dense_h_to_4h.weight" in key:
self.mlph4h_scales.append(q_scale)
elif "attention.query_key_value.weight" in key:
self.qkv_scales.append(q_scale)
else:
self.dense_scales.append(q_scale)
return value_list
def merge_layer_scales(self, layer_scales):
max_dim = max([s.shape[-1] for s in layer_scales])
layer_scales = [
torch.cat((s, torch.zeros((1, max_dim - s.shape[-1]), device=get_accelerator().current_device_name())),
dim=-1) if s.shape[-1] < max_dim else s for s in layer_scales
]
return torch.cat(layer_scales).unsqueeze(0)
def merge_scales(self):
all_scales = []
for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \
zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales):
all_scales.append(self.merge_layer_scales([qkv_scale, dense_scale, mh4h_scale, m4hh_scale]))
return torch.cat(all_scales)
def merge_scales_split(self, split_count):
all_scales = [[] for _ in range(split_count)]
for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \
zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales):
dense_scale = torch.split(dense_scale, dense_scale.numel() // split_count)
qkv_scale = torch.split(qkv_scale, qkv_scale.numel() // split_count)
m4hh_scale = torch.split(m4hh_scale, m4hh_scale.numel() // split_count)
mh4h_scale = torch.split(mh4h_scale, mh4h_scale.numel() // split_count)
for s in range(split_count):
all_scales[s].append(
torch.cat([
torch.cat((qkv_scale[s], torch.zeros_like(qkv_scale[s])), dim=1),
torch.cat((dense_scale[s], torch.zeros_like(dense_scale[s])), dim=1), mh4h_scale[s],
m4hh_scale[s]
]).unsqueeze(0))
for scales_a in all_scales:
torch.cat(scales_a)
return all_scales
def sd_quantize_megatron(self, sd, quantize_bits, groups):
keys = sd.keys()
for key in keys:
value_list = [sd[key]]
if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key or \
"mlp.dense_h_to_4h.weight" in key or "attention.query_key_value.weight" in key:
value_list = self.Quantize(value_list, quantize_bits, groups, key=key)
sd[key] = value_list[0]
all_scales = self.merge_scales()
return sd, all_scales
def model_quantize(self, model, quantize_policy, quantize_bits, groups):
all_scales = []
def quantize_fn(layer, policy_cls):
policy = policy_cls(layer)
_, qkvw, _, dense_w, _, _ = policy.attention()
_, _h4h_w, _, _4hh_w, _ = policy.mlp()
keys = [qkvw, dense_w, _h4h_w, _4hh_w]
layer_scales = []
for key in range(len(keys)):
if self.mlp_extra_grouping and self.is_mlp(keys[key]):
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 2)
elif policy_cls is HFBertLayerPolicy and self.is_qkv(keys[key]):
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 3)
else:
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups)
keys[key].copy_(data_quantized)
layer_scales.append((1 / data_scale.to(get_accelerator().current_device_name()).view(-1).unsqueeze(0)))
all_scales.append(self.merge_layer_scales(layer_scales))
return layer
def _quantize_module(model, policies):
for name, child in model.named_children():
if child.__class__ in policies:
quantize_fn, replace_policy = policies[child.__class__]
setattr(model, name, quantize_fn(child, replace_policy))
else:
_quantize_module(child, policies)
return model
policy = {}
if quantize_policy is not None:
for layer_name, replace_policy in quantize_policy.items():
policy.update({layer_name: (quantize_fn, replace_policy)})
else:
for plcy in replace_policies:
policy.update({plcy._orig_layer_class: (quantize_fn, plcy)})
quantized_module = _quantize_module(model, policy)
return quantized_module, torch.cat(all_scales)
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#############################################
# Routes
#############################################
ROUTE_TRAIN = "train"
ROUTE_EVAL = "eval"
ROUTE_PREDICT = "predict"
ROUTE_ENCODE = "encode"
#############################################
# Batch size
#############################################
TRAIN_BATCH_SIZE = "train_batch_size"
TRAIN_BATCH_SIZE_DEFAULT = None
#############################################
# Sparse attention
#############################################
SPARSE_ATTENTION = "sparse_attention"
SPARSE_DENSE_MODE = "dense"
SPARSE_FIXED_MODE = "fixed"
SPARSE_VARIABLE_MODE = "variable"
SPARSE_BIGBIRD_MODE = "bigbird"
SPARSE_BSLONGFORMER_MODE = "bslongformer"
SPARSE_MODE = "mode"
SPARSE_MODE_DEFAULT = SPARSE_FIXED_MODE
SPARSE_BLOCK = "block"
SPARSE_BLOCK_DEFAULT = 16
SPARSE_DIFFERENT_LAYOUT_PER_HEAD = "different_layout_per_head"
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT = False
SPARSE_NUM_LOCAL_BLOCKS = "num_local_blocks"
SPARSE_NUM_LOCAL_BLOCKS_DEFAULT = 4
SPARSE_NUM_GLOBAL_BLOCKS = "num_global_blocks"
SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT = 1
SPARSE_ATTENTION_TYPE = "attention"
SPARSE_ATTENTION_TYPE_DEFAULT = "bidirectional"
SPARSE_HORIZONTAL_GLOBAL_ATTENTION = "horizontal_global_attention"
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT = False
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS = "num_different_global_patterns"
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT = 1
SPARSE_NUM_RANDOM_BLOCKS = "num_random_blocks"
SPARSE_NUM_RANDOM_BLOCKS_DEFAULT = 0
SPARSE_LOCAL_WINDOW_BLOCKS = "local_window_blocks"
SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT = [4]
SPARSE_GLOBAL_BLOCK_INDICES = "global_block_indices"
SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT = [0]
SPARSE_GLOBAL_BLOCK_END_INDICES = "global_block_end_indices"
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT = None
SPARSE_NUM_SLIDING_WINDOW_BLOCKS = "num_sliding_window_blocks"
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT = 3
#############################################
# Optimizer and lr scheduler
#############################################
OPTIMIZER = "optimizer"
OPTIMIZER_TYPE_DEFAULT = None
OPTIMIZER_PARAMS = "params"
TYPE = "type"
LEGACY_FUSION = "legacy_fusion"
LEGACY_FUSION_DEFAULT = False
SCHEDULER = "scheduler"
SCHEDULER_TYPE_DEFAULT = None
SCHEDULER_PARAMS = "params"
MAX_GRAD_NORM = 'max_grad_norm'
#############################################
# Optimizer and lr scheduler
#############################################
ZERO_ALLOW_UNTESTED_OPTIMIZER = "zero_allow_untested_optimizer"
ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT = False
ZERO_FORCE_DS_CPU_OPTIMIZER = "zero_force_ds_cpu_optimizer"
ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT = True
# Steps
STEPS_PER_PRINT = "steps_per_print"
STEPS_PER_PRINT_DEFAULT = 10
#########################################
# Training micro batch size per GPU
#########################################
# Batch size for one training step. This is used when the
# TRAIN_BATCH_SIZE cannot fit in GPU memory to determine
# the number of gradient accumulation steps. By default, this
# is set to None. Users can configure in ds_config.json as below example:
TRAIN_MICRO_BATCH_SIZE_PER_GPU = '''
TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format:
"train_micro_batch_size_per_gpu": 1
'''
TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu"
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = None
#########################################
# Gradient Accumulation
#########################################
# Gradient accumulation feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
GRADIENT_ACCUMULATION_FORMAT = '''
Gradient Accumulation should be of the format:
"gradient_accumulation_steps": 1
'''
GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
GRADIENT_ACCUMULATION_STEPS_DEFAULT = None
# DeepSpeed CSR gradient sparsity
SPARSE_GRADIENTS = "sparse_gradients"
SPARSE_GRADIENTS_DEFAULT = False
#########################################
# BFLOAT16 support
#########################################
# BFLOAT16 feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
BFLOAT16_FORMAT = '''
BFLOAT16 parameters should be of the format:
"bf16": {
"enabled": true
}
'''
BFLOAT16 = "bf16"
BFLOAT16_OLD = "bfloat16" # keeping for backwards compatibility
BFLOAT16_ENABLED = "enabled"
BFLOAT16_ENABLED_DEFAULT = False
#########################################
# FP16 support
#########################################
# FP16 feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
FP16_FORMAT = '''
FP16 parameters should be of the format:
"fp16": {
"enabled": true,
"auto_cast": false,
"loss_scale": 0,
"initial_scale_power": 16,
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
}
'''
FP16 = "fp16"
FP16_ENABLED = "enabled"
FP16_ENABLED_DEFAULT = False
# FP16 loss scale, zero means using dynamic scaling
FP16_LOSS_SCALE = "loss_scale"
FP16_LOSS_SCALE_DEFAULT = 0
FP16_AUTO_CAST = "auto_cast"
FP16_AUTO_CAST_DEFAULT = False
# FP16 initial dynamic scale loss power
FP16_INITIAL_SCALE_POWER = "initial_scale_power"
FP16_INITIAL_SCALE_POWER_DEFAULT = 16
# FP16 loss scale window
FP16_LOSS_SCALE_WINDOW = "loss_scale_window"
FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000
# FP16 hysteresis
FP16_HYSTERESIS = "hysteresis"
FP16_HYSTERESIS_DEFAULT = 2
# FP16 min loss scale
FP16_MIN_LOSS_SCALE = "min_loss_scale"
FP16_MIN_LOSS_SCALE_DEFAULT = 1
# FP16 master and grads
FP16_MASTER_WEIGHTS_AND_GRADS = "fp16_master_weights_and_grads"
FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT = False
#########################################
# Apex AMP support
#########################################
# Use Apex AMP for mixed precision support, all parameters (other than 'enabled') will be passed to
# amp.initialize(model, optimizer, **amp_params)
# See apex documentation for supported parameters/features: https://nvidia.github.io/apex/amp.html#apex.amp.initialize
AMP_FORMAT = '''
"amp" {
"enabled: true,
"opt_level": "O1",
...
}
'''
AMP = "amp"
AMP_ENABLED = "enabled"
AMP_ENABLED_DEFAULT = False
#########################################
# Gradient clipping
#########################################
# Gradient clipping. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
GRADIENT_CLIPPING_FORMAT = '''
Gradient clipping should be enabled as:
"gradient_clipping": 1.0
'''
GRADIENT_CLIPPING = 'gradient_clipping'
GRADIENT_CLIPPING_DEFAULT = 0.
#########################################
# Communication data type
#########################################
# Supported types: ['none', 'fp16', 'fp32']
# By default, this feature is not enabled ('none' value)
# Users can configure in ds_config.json as below example:
COMMUNICATION_DATA_TYPE_FORMAT = '''
Communication data type should be set as:
"communication_data_type": "fp32"
'''
COMMUNICATION_DATA_TYPE = "communication_data_type"
COMMUNICATION_DATA_TYPE_DEFAULT = None
#########################################
# Scale/predivide gradients before allreduce
#########################################
# Prescale gradients. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
PRESCALE_GRADIENTS_FORMAT = '''
Gradient prescaling should be enabled as:
"prescale_gradients": true
'''
PRESCALE_GRADIENTS = "prescale_gradients"
PRESCALE_GRADIENTS_DEFAULT = False
GRADIENT_PREDIVIDE_FACTOR_FORMAT = '''
Gradient predivide factor should be enabled as:
"gradient_predivide_factor": 1.0
'''
GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor"
GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0
#########################################
# Disable AllGather
#########################################
# Disable AllGather. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
DISABLE_ALLGATHER_FORMAT = '''
Disable AllGather should be enabled as:
"disable_allgather": true
'''
DISABLE_ALLGATHER = "disable_allgather"
DISABLE_ALLGATHER_DEFAULT = False
#########################################
# Dump DeepSpeed state
#########################################
# Dump State. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
DUMP_STATE_FORMAT = '''
Dump state should be enabled as:
"dump_state": true
'''
DUMP_STATE = 'dump_state'
DUMP_STATE_DEFAULT = False
#########################################
# Vocabulary size
#########################################
# Vocabulary size.
# Users can configure in ds_config.json as below example:
VOCABULARY_SIZE_FORMAT = '''
Vocabulary size can be specified as:
"vocabulary_size": 1024
'''
VOCABULARY_SIZE = 'vocabulary_size'
VOCABULARY_SIZE_DEFAULT = None
#########################################
# Wall block breakdown
#########################################
# Wall clock breakdown. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
WALL_CLOCK_BREAKDOWN_FORMAT = '''
Wall block breakdown should be enabled as:
"wall_clock_breakdown": true
'''
WALL_CLOCK_BREAKDOWN = 'wall_clock_breakdown'
WALL_CLOCK_BREAKDOWN_DEFAULT = False
MEMORY_BREAKDOWN = 'memory_breakdown'
MEMORY_BREAKDOWN_DEFAULT = False
#########################################
# Eigenvalue
#########################################
# Eigenvalue computation. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
EIGENVALUE_FORMAT = '''
Tensorboard can be specified as:
"eigenvalue": {
"enabled": true,
"verbose": true,
"max_iter": 100,
"tol": 1e-2,
"stability": 1e-6
}
'''
EIGENVALUE = "eigenvalue"
# Tensorboard enable signal
EIGENVALUE_ENABLED = "enabled"
EIGENVALUE_ENABLED_DEFAULT = False
EIGENVALUE_VERBOSE = "verbose"
EIGENVALUE_VERBOSE_DEFAULT = False
EIGENVALUE_MAX_ITER = "max_iter"
EIGENVALUE_MAX_ITER_DEFAULT = 100
EIGENVALUE_TOL = "tol"
EIGENVALUE_TOL_DEFAULT = 1e-2
EIGENVALUE_STABILITY = "stability"
EIGENVALUE_STABILITY_DEFAULT = 1e-6
EIGENVALUE_GAS_BOUNDARY_RESOLUTION = "gas_boundary_resolution"
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT = 1
EIGENVALUE_LAYER_NAME = "layer_name"
EIGENVALUE_LAYER_NAME_DEFAULT = "bert.encoder.layer"
EIGENVALUE_LAYER_NUM = "layer_num"
EIGENVALUE_LAYER_NUM_DEFAULT = 0
#########################################
# Progressive Layer Drop (PLD)
#########################################
PROGRESSIVE_LAYER_DROP = "progressive_layer_drop"
# PLD enable signal
PLD_ENABLED = "enabled"
PLD_ENABLED_DEFAULT = False
PLD_THETA = "theta"
PLD_THETA_DEFAULT = 1.0
PLD_GAMMA = "gamma"
PLD_GAMMA_DEFAULT = 0.001
#########################################
# Validation modes
#########################################
class ValidationMode:
WARN = "WARN"
IGNORE = "IGNORE"
FAIL = "FAIL"
#########################################
# Checkpoint config params
#########################################
# "checkpoint": {
# tag_validation=["Ignore"|"Warn"|"Fail"]
# load_universal=false
# use_node_local_storage=false
# parallel_write: {
# pipeline_stage: [True|False]
# }
# }
CHECKPOINT = "checkpoint"
CHECKPOINT_TAG_VALIDATION = "tag_validation"
CHECKPOINT_TAG_VALIDATION_DEFAULT = ValidationMode.WARN
CHECKPOINT_TAG_VALIDATION_MODES = [ValidationMode.WARN, ValidationMode.IGNORE, ValidationMode.FAIL]
LOAD_UNIVERSAL_CHECKPOINT = "load_universal"
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False
USE_NODE_LOCAL_STORAGE_CHECKPOINT = "use_node_local_storage"
USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT = False
CHECKPOINT_PARALLEL_WRITE = "parallel_write"
CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage"
CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False
#########################################
# Data types config params
#########################################
# "data_types": {
# grad_accum_dtype=["bf16"|"fp16"|"fp32"]
# }
# }
DATA_TYPES = "data_types"
GRAD_ACCUM_DTYPE = "grad_accum_dtype"
GRAD_ACCUM_DTYPE_DEFAULT = None
#########################################
# Drop the last incomplete Batch
# #########################################
# dataloader_drop_last. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
DATALOADER_DROP_LAST_FORMAT = '''
The last incomplete batch can be dropped by setting:
"dataloader_drop_last": True
'''
DATALOADER_DROP_LAST = "dataloader_drop_last"
DATALOADER_DROP_LAST_DEFAULT = False
#########################################
# PIPELINE PARALLELISM
#########################################
PIPE_REPLICATED = 'ds_pipe_replicated'
#########################################
# DATA PARALLELISM
#########################################
DATA_PARALLEL_GROUP = "data_parallel_group"
GLOBAL_RANK = "global_rank"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.