diff --git a/.gitattributes b/.gitattributes index e8ed92b1f60c2bc601898b629a3fdfff84f651c4..241eb23f00c695349416a7f419bb0e82748d0bf2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -142,3 +142,6 @@ venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text diff --git a/venv/bin/accelerate-config b/venv/bin/accelerate-config new file mode 100644 index 0000000000000000000000000000000000000000..4d9c885ca4a55e0c187d01abcb603a9f4c530da8 --- /dev/null +++ b/venv/bin/accelerate-config @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.config import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/accelerate-launch b/venv/bin/accelerate-launch new file mode 100644 index 0000000000000000000000000000000000000000..1d9df0ae301903f143a2df034e567a8a9af16a8a --- /dev/null +++ b/venv/bin/accelerate-launch @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.launch import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/activate b/venv/bin/activate new file mode 100644 index 0000000000000000000000000000000000000000..a842b0bae03726d762cb3ebe7283dbbc6e328e20 --- /dev/null +++ b/venv/bin/activate @@ -0,0 +1,69 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/mnt/weka/peacock/cronscript/venv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(venv) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(venv) " + export VIRTUAL_ENV_PROMPT +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh new file mode 100644 index 0000000000000000000000000000000000000000..d96eb9409c04feb826c9c2774144081ea7a6959d --- /dev/null +++ b/venv/bin/activate.csh @@ -0,0 +1,26 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/mnt/weka/peacock/cronscript/venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(venv) $prompt" + setenv VIRTUAL_ENV_PROMPT "(venv) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/venv/bin/chardetect b/venv/bin/chardetect new file mode 100644 index 0000000000000000000000000000000000000000..3185e688f1f1aa62b0d27e854ac67b2e563c3313 --- /dev/null +++ b/venv/bin/chardetect @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from chardet.cli.chardetect import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/ds_bench b/venv/bin/ds_bench new file mode 100644 index 0000000000000000000000000000000000000000..c0b7d221f319b64510910cd7a84cb0392aec154c --- /dev/null +++ b/venv/bin/ds_bench @@ -0,0 +1,17 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 + +from benchmarks.communication.run_all import main +from benchmarks.communication.constants import * +from benchmarks.communication.utils import * +import os +import sys + +# Run the same file with deepspeed launcher. This is required since setuptools will auto-detect python files and insert a python shebang for both 'scripts' and 'entry_points', and this benchmarks require the DS launcher +required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] +if not all(map(lambda v: v in os.environ, required_env)): + import subprocess + subprocess.run("deepspeed $(which ds_bench) " + " ".join(sys.argv[1:]), shell=True) +else: + args = benchmark_parser().parse_args() + rank = args.local_rank + main(args, rank) diff --git a/venv/bin/ds_report b/venv/bin/ds_report new file mode 100644 index 0000000000000000000000000000000000000000..bc085b7f2952c5ec43bfb5b7f2254c814c5e4b7e --- /dev/null +++ b/venv/bin/ds_report @@ -0,0 +1,6 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 + +from deepspeed.env_report import cli_main + +if __name__ == '__main__': + cli_main() diff --git a/venv/bin/hjson b/venv/bin/hjson new file mode 100644 index 0000000000000000000000000000000000000000..b41fdcde2fddaa82ffb720f37c4cd86e7a98f7d7 --- /dev/null +++ b/venv/bin/hjson @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from hjson.tool import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/isympy b/venv/bin/isympy new file mode 100644 index 0000000000000000000000000000000000000000..7c681459eda8dc09b2d966e99219f69f1e13b001 --- /dev/null +++ b/venv/bin/isympy @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from isympy import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/lm_eval b/venv/bin/lm_eval new file mode 100644 index 0000000000000000000000000000000000000000..0f1b3730eb14f68841af93b9fe3377b84e296edc --- /dev/null +++ b/venv/bin/lm_eval @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from lm_eval.__main__ import cli_evaluate +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_evaluate()) diff --git a/venv/bin/nltk b/venv/bin/nltk new file mode 100644 index 0000000000000000000000000000000000000000..ef7a87da5854c7ab2cc282a07107fa0bb3d1d7e7 --- /dev/null +++ b/venv/bin/nltk @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from nltk.cli import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv/bin/pip b/venv/bin/pip new file mode 100644 index 0000000000000000000000000000000000000000..f85e2b2430b776c880beb2d7a979c7d82343d01f --- /dev/null +++ b/venv/bin/pip @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/torchrun b/venv/bin/torchrun new file mode 100644 index 0000000000000000000000000000000000000000..116a41bb3c616f5e7c4ff04d776bdd93528ebd68 --- /dev/null +++ b/venv/bin/torchrun @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from torch.distributed.run import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/tqdm b/venv/bin/tqdm new file mode 100644 index 0000000000000000000000000000000000000000..55015dfac41e823d2d25eb42a6a665136f438716 --- /dev/null +++ b/venv/bin/tqdm @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tqdm.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/transformers-cli b/venv/bin/transformers-cli new file mode 100644 index 0000000000000000000000000000000000000000..b66d5d6a374ee29df7634db23596a7c898e09deb --- /dev/null +++ b/venv/bin/transformers-cli @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from transformers.commands.transformers_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/undill b/venv/bin/undill new file mode 100644 index 0000000000000000000000000000000000000000..252d8375cf3bad3e167ae022a35d722364cd0e55 --- /dev/null +++ b/venv/bin/undill @@ -0,0 +1,22 @@ +#!/mnt/weka/peacock/cronscript/venv/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +unpickle the contents of a pickled object file + +Examples:: + + $ undill hello.pkl + ['hello', 'world'] +""" + +if __name__ == '__main__': + import sys + import dill + for file in sys.argv[1:]: + print (dill.load(open(file,'rb'))) + diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd3a9263f619421e17656c92c6326a76d596da7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/ccl.py b/venv/lib/python3.10/site-packages/deepspeed/comm/ccl.py new file mode 100644 index 0000000000000000000000000000000000000000..f2a6cb6a36dc6ad37771d0000561cd8a482ed63c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/ccl.py @@ -0,0 +1,193 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +''' +Copyright 2021 The Microsoft DeepSpeed Team +''' + +import torch +from deepspeed.accelerator import get_accelerator +from .reduce_op import ReduceOp +from .torch import TorchBackend + + +def build_ccl_op(): + builder = get_accelerator().create_op_builder("CCLCommBuilder") + if builder is None: + return None + ccl_cpp_module = builder.load() + print(f'DeepSpeed {builder.absolute_name()} built successfully') + return ccl_cpp_module + + +class CCLHandler(): + + def __init__(self, ccl_comm_op=None): + self.ccl_comm_op = ccl_comm_op + + def wait(self): + # backend covered it + pass + + +class CCLBackend(TorchBackend): + + def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None): + self.ccl_comm_op = build_ccl_op() + if self.ccl_comm_op is None: + # set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded + self.initialized = False + return + super(CCLBackend, self).__init__(backend='ccl', + name='torch', + rank=rank, + world_size=world_size, + timeout=timeout, + init_method=init_method) + self.name = 'ccl' + size = self.get_world_size() + rank = self.get_rank() + main_kvs = self.ccl_comm_op.get_kvs_addr(rank) + main_kvs = torch.tensor(main_kvs).to(torch.uint8).to(get_accelerator().current_device_name()) + super(CCLBackend, self).broadcast(main_kvs, 0) + self.ccl_comm_op.initialize(size, rank, main_kvs) + self.initialized = True + self.groups = [tuple(range(self.get_world_size()))] + self.available_coll = self.ccl_comm_op.get_available_coll() + + def is_initialized(self): + return self.initialized + + def run_collective(self, name, **kwargs): + if name in self.available_coll: + if 'group' in kwargs: + kwargs['group'] = self.get_all_ranks_from_group(kwargs['group']) + if 'dst' in kwargs: + kwargs['dst'] = kwargs['group'].index(kwargs['dst']) + if 'src' in kwargs: + kwargs['src'] = kwargs['group'].index(kwargs['src']) + func = "self.ccl_comm_op." + name + eval(func)(*(kwargs.values())) + return CCLHandler(self.ccl_comm_op) + else: + func = "super(CCLBackend, self)." + name + eval(func)(*(kwargs.values())) + return CCLHandler(self.ccl_comm_op) + + def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False): + use_caching = False + if use_caching: + match_id = f"{tensor.size()}-{op}" + name = "all_reduce_caching" + if name in self.available_coll: + group = self.get_all_ranks_from_group(group) + return self.ccl_comm_op.all_reduce_caching(tensor, op, match_id, group, async_op) + else: + return self.run_collective(name=name, + tensor=tensor, + op=op, + match_id=match_id, + group=group, + async_op=async_op) + else: + name = "all_reduce" + if name in self.available_coll: + group = self.get_all_ranks_from_group(group) + return self.ccl_comm_op.all_reduce(tensor, op, group, async_op) + else: + return self.run_collective(name=name, tensor=tensor, op=op, group=group, async_op=async_op) + + def inference_all_reduce(self, tensor, op=ReduceOp.SUM, group=None): + name = "inference_all_reduce" + if name in self.available_coll: + return self.ccl_comm_op.inference_all_reduce(tensor, op) + else: + return self.run_collective(name=name, tensor=tensor, op=op, group=None, async_op=False) + + def broadcast(self, tensor, src, group=None, async_op=False): + return self.run_collective(name="broadcast", tensor=tensor, src=src, group=group, async_op=async_op) + + def all_gather(self, tensor_list, tensor, group=None, async_op=False): + return self.run_collective(name="all_gather", + tensor_list=tensor_list, + tensor=tensor, + group=group, + async_op=async_op) + + def reduce_scatter_tensor(self, output_tensor, input_tensor, op, group=None, async_op=False): + return self.run_collective(name="reduce_scatter_tensor", + output_tensor=output_tensor, + input_tensor=input_tensor, + op=op, + group=group) + + def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False): + return self.run_collective(name="all_gather_into_tensor", + output_tensor=output_tensor, + input_tensor=input_tensor, + group=group) + + def all_to_all_single(self, output, input, output_split_sizes, input_split_sizes, group=None, async_op=False): + return self.run_collective(name="all_to_all_single", + output=output, + input=input, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group) + + def send(self, tensor, dst, group=None, tag=0): + return self.run_collective(name="send", tensor=tensor, dst=dst, group=group, tag=tag) + + def recv(self, tensor, src, group=None, tag=0): + return self.run_collective(name="recv", tensor=tensor, src=src, group=group, tag=tag) + + def gather(self, tensor, gather_list, dst, group=None, async_op=False): + return self.run_collective(name="gather", tensor=tensor, gather_list=gather_list, dst=dst, group=group) + + def scatter(self, tensor, gather_list, dst, group=None, async_op=False): + return self.run_collective(name="scatter", tensor=tensor, gather_list=gather_list, dst=dst, group=group) + + def barrier(self, group=None, async_op=False): + return self.run_collective(name="barrier", group=group, async_op=async_op) + + def monitored_barrier(self, group=None, timeout=None, wait_all_ranks=False): + return self.run_collective(name="monitored_barrier", group=group) + + def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False): + return self.run_collective(name="reduce_scatter", + output=output, + input_list=input_list, + op=op, + group=group, + async_op=async_op) + + def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): + return self.run_collective(name="reduce", tensor=tensor, dst=dst, op=op, group=group, async_op=async_op) + + def new_group(self, ranks): + return super(CCLBackend, self).new_group(ranks) + + def _new_group(self, ranks, group): + size = len(ranks) + rank = self.get_rank() + sub_main_kvs = self.ccl_comm_op.get_sub_kvs_addr(rank == ranks[0]) + sub_main_kvs = torch.tensor(sub_main_kvs).to(torch.uint8).to(get_accelerator().current_device_name()) + super(CCLBackend, self).broadcast(sub_main_kvs, ranks[0], group) + self.ccl_comm_op.initialize_sub_comm(size, ranks.index(rank), sub_main_kvs, ranks) + self.groups.append(tuple(ranks)) + + def get_all_ranks_from_group(self, group): + if group is None: + return list(range(self.get_world_size())) + rank = 0 + results = [] + try: + while True: + results.append(super(CCLBackend, self).get_global_rank(group, rank)) + rank += 1 + except (ValueError, RuntimeError): + pass + if tuple(results) not in self.groups: + self._new_group(results, group) + return results diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/comm.py b/venv/lib/python3.10/site-packages/deepspeed/comm/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..4f826edab3d6311a6e494b4834c98ea5bdc2880d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/comm.py @@ -0,0 +1,771 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" + DeepSpeed Communication Package: deepspeed.comm + deepspeed.comm + -- import and use deepspeed.ops.comm + -- use torch.distributed directly if both this package and torch.distributed use the same NCCL version + -- use custom collectives + -- can either use torch.dist or ds.ops.comm? + Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well. + deepspeed.comm API + -- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility. + -- e.g. if a client code used + from deepspeed import comm as dist + instead of + import torch.distributed as dist + The code should work without breaking any of the public torch.distributed functionality + Future: + -- deepspeed groups API should be brought into ds.comm +""" + +import torch +import os + +from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout +from .constants import * +from .reduce_op import * +from deepspeed.accelerator import get_accelerator +from deepspeed.comm.ccl import CCLBackend + +from deepspeed.utils.comms_logging import CommsLogger +from deepspeed.utils import timer, get_caller_func +from deepspeed.comm.torch import TorchBackend + +from deepspeed import utils +from datetime import timedelta + +# Current deepspeed.comm backend (cdb) global object for simple access by client code +cdb = None + +# Create global timer for ops +timers = timer.SynchronizedWallClockTimer() +timer_summary = {} + +comms_logger = CommsLogger() + +# Maintain objects of all initialized ds backends and assign them using the API functions in this file +nccl_backend = None +mpi_backend = None +ccl_backend = None +hccl_backend = None + +# This should be set here so all rank/size information from the launcher can be propagated +from deepspeed.comm.utils import * + + +class ProcessGroup(): + + def __init__(self, comm_id, ranks=[]): + self.ranks = ranks + self.comm_id = comm_id + self.size = len(ranks) + + +def _configure_using_config_file(config): + if config.comms_logger_enabled: + comms_logger.configure(config) + + +def configure( + deepspeed_config=None, + enabled=None, + prof_all=None, + prof_ops=None, + verbose=None, + debug=None, +): + + if deepspeed_config is not None: + _configure_using_config_file(deepspeed_config.comms_config) + + if enabled is not None: + comms_logger.enabled = enabled + + if prof_all is not None: + comms_logger.prof_all = prof_all + + if prof_ops is not None: + comms_logger.prof_ops = prof_ops + + if verbose is not None: + comms_logger.verbose = verbose + + if debug is not None: + comms_logger.debug = debug + + +# Logging wrapper for timing ops +def timed_op(func): + + def log_wrapper(*args, **kwargs): + # Add enabled flag so that overhead to each comm op is two if conditions at most + if comms_logger.enabled: + if ('prof' in kwargs + and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs + and kwargs['log_name'] in comms_logger.prof_ops): + # Need func args for their defaults + func_args = get_default_args(func) + func_args.update(kwargs) + msg_size = get_msg_size_from_args(func, *args, **kwargs) + log_name = get_debug_log_name(func_args, comms_logger.debug) + timers(log_name).start() + # Return the op, then stop the op's timer + try: + return func(*args, **kwargs) + finally: + if comms_logger.enabled: + # Need to make op blocking for accurate logging + get_accelerator().synchronize() + # If we're using MPI, we can't simply sync the stream + if cdb.using_mpi: + cdb.barrier() + if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or ( + 'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops): + log_name = get_debug_log_name(func_args, comms_logger.debug) + raw_name = func.__name__ + timers(log_name).stop() + # need temp var since 'elapsed' resets events + time_elapsed = timers(log_name).elapsed(reset=False) + comms_logger.append(raw_name, log_name, time_elapsed, msg_size) + + return log_wrapper + + +# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code. +# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation. +# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html + + +# UNUSED: Future helper function to initialize DS backends +def init_deepspeed_backend(ds_backend, timeout, init_method): + global cdb + global nccl_backend + global mpi_backend + global ccl_backend + global hccl_backend + + rank = int(os.getenv('RANK', '-1')) + size = int(os.getenv('WORLD_SIZE', '-1')) + + if ds_backend == NCCL_BACKEND: + utils.logger.debug("NCCL backend in DeepSpeed not yet implemented") + elif ds_backend == MPI_BACKEND: + utils.logger.debug("MPI backend in DeepSpeed not yet implemented") + elif ds_backend == GLOO_BACKEND: + utils.logger.debug("Gloo backend in DeepSpeed not yet implemented") + elif ds_backend == CCL_BACKEND: + ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method) + utils.logger.info(f"Initialize {ds_backend} backend") + elif ds_backend == HCCL_BACKEND: + utils.logger.debug("HCCL backend in DeepSpeed not yet implemented") + else: + utils.logger.debug(f"DeepSpeed does not support {ds_backend} backend") + + +def is_initialized(): + #assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb is None: + return False + else: + return cdb.is_initialized() + + +def destroy_process_group(group=None): + global cdb + return cdb.destroy_process_group(group=group) + + +def new_group(ranks): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.new_group(ranks) + + +def is_available() -> bool: + + # Returns ``True`` if the deepspeed comm package is available. + + # TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import + # any communication related primitives from this package. + # use hasattr(deepspeed.csrc.ops, "_comm") or something + return True + + +def set_backend(): + global cdb + global nccl_backend + global mpi_backend + global ccl_backend + global hccl_backend + + backend_name = get_accelerator().communication_backend_name() + + if backend_name == NCCL_BACKEND: + if nccl_backend is not None and nccl_backend.is_initialized(): + cdb = nccl_backend + elif backend_name == MPI_BACKEND: + if mpi_backend is not None and mpi_backend.is_initialized(): + cdb = mpi_backend + elif backend_name == CCL_BACKEND: + if ccl_backend is not None and ccl_backend.is_initialized(): + cdb = ccl_backend + elif backend_name == HCCL_BACKEND: + if hccl_backend is not None and hccl_backend.is_initialized(): + cdb = hccl_backend + + +@timed_op +def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()): + global cdb + return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) + + +@timed_op +def all_gather(tensor_list, + tensor, + group=None, + async_op=False, + prof=False, + log_name='all_gather', + debug=get_caller_func()): + global cdb + return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) + + +def has_reduce_scatter_tensor(): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.has_reduce_scatter_tensor() + + +def reduce_scatter_fn(output_tensor, + tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + debug=get_caller_func()): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb.has_reduce_scatter_tensor(): + return reduce_scatter_tensor(output_tensor, + tensor, + op=op, + group=group, + async_op=async_op, + prof=prof, + debug=debug) + else: + if get_rank() == 0: + utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to " + "torch.distributed.reduce_scatter which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group))) + return reduce_scatter(output_tensor, + input_tensor_lst, + op=op, + group=group, + async_op=async_op, + prof=prof, + debug=debug) + + +@timed_op +def reduce_scatter_tensor(output_tensor, + tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce_scatter_tensor', + debug=get_caller_func()): + global cdb + return cdb.reduce_scatter_tensor(output_tensor=output_tensor, + input_tensor=tensor, + op=op, + group=group, + async_op=async_op) + + +@timed_op +def all_gather_into_tensor(output_tensor, + tensor, + group=None, + async_op=False, + prof=False, + log_name='all_gather_into_tensor', + debug=get_caller_func()): + global cdb + return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op) + + +def has_all_gather_into_tensor(): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.has_all_gather_into_tensor() + + +def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb.has_all_gather_into_tensor(): + return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug) + else: + if get_rank() == 0: + utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to " + "torch.distributed.all_gather which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group))) + return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug) + + +@timed_op +def all_to_all_single(output, + tensor, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False, + prof=False, + log_name='all_to_all_single', + debug=get_caller_func()): + global cdb + return cdb.all_to_all_single(output=output, + input=tensor, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + async_op=async_op) + + +@timed_op +def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False): + global cdb + return cdb.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op) + + +@timed_op +def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()): + global cdb + return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) + + +@timed_op +def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()): + global cdb + return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) + + +@timed_op +def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()): + global cdb + return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) + + +@timed_op +def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()): + global cdb + return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) + + +@timed_op +def gather(tensor, + gather_list=None, + dst=0, + group=None, + async_op=False, + prof=False, + log_name='gather', + debug=get_caller_func()): + global cdb + return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op) + + +@timed_op +def scatter(tensor, + scatter_list=None, + src=0, + group=None, + async_op=False, + prof=False, + log_name='scatter', + debug=get_caller_func()): + global cdb + return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op) + + +@timed_op +def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()): + global cdb + return cdb.barrier(group=group, async_op=async_op) + + +@timed_op +def monitored_barrier(group=None, + timeout=None, + wait_all_ranks=False, + prof=False, + log_name='monitored_barrier', + debug=get_caller_func()): + global cdb + return cdb.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) + + +def log_summary(show_straggler=False): + global cdb + barrier(log_name='log_summary_barrier') + if cdb.get_rank() == 0: + comms_logger.log_all(print_log=True, show_straggler=show_straggler) + else: + comms_logger.log_all(print_log=False, show_straggler=show_straggler) + barrier(log_name='log_summary_barrier') + + +@timed_op +def reduce(tensor, + dst, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce', + debug=get_caller_func()): + global cdb + return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op) + + +@timed_op +def reduce_scatter(output, + input_list, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce_scatter', + debug=get_caller_func()): + global cdb + return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op) + + +def has_all_reduce_coalesced(): + """""" + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined' + return cdb.has_all_reduce_coalesced + + +def has_coalescing_manager(): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined' + return cdb.has_coalescing_manager + + +def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op) + + +@timed_op +def all_reduce(tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='all_reduce', + debug=get_caller_func()): + #if profile_comm: + # context of the timers? + # timers.start() + # TensorBoard logging for comm calls.? + global cdb + #print(f'op = {op}, cdb= {cdb.name}') + return cdb.all_reduce(tensor, op, group, async_op) + + +@timed_op +def inference_all_reduce(tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='all_reduce', + debug=get_caller_func()): + global cdb + return cdb.inference_all_reduce(tensor, op, group) + + +@timed_op +def all_reduce_coalesced(tensors, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='all_reduce', + debug=get_caller_func()): + global cdb + return cdb.all_reduce_coalesced(tensors, op, group, async_op) + + +def get_world_group(): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_world_group() + + +def get_world_size(group=None) -> int: + """ + Returns the number of processes in the current process group + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + The world size of the process group + -1, if not part of the group + """ + global cdb + + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_world_size(group) + + +def get_rank(group=None): + """ + Returns the rank of the current process in the provided ``group`` or the + default group if none was provided. + Rank is a unique identifier assigned to each process within a distributed + process group. They are always consecutive integers ranging from 0 to + ``world_size``. + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + The rank of the process group + -1, if not part of the group + """ + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_rank(group) + + +def get_local_rank(): + """ + Helper function to get local rank after a backend has been set and initialized + Args: + None + Returns: + local rank (= GPU device ID) + """ + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return get_local_rank_from_launcher() + + +def get_global_rank(group=None, group_rank=0): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_global_rank(group, group_rank) + + +def get_all_ranks_from_group(group=None): + global cdb + assert cdb is not None and cdb.is_initialized( + ), 'DeepSpeed backend not set, please initialize it using init_process_group()' + rank = 0 + group_ranks = [] + try: + while True: + group_ranks.append(cdb.get_global_rank(group, rank)) + rank += 1 + except (RuntimeError, ValueError): + pass + return group_ranks + + +# Main DeepSpeed Comms. public API. +def init_distributed(dist_backend=None, + auto_mpi_discovery=True, + distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, + verbose=True, + timeout=default_pg_timeout, + init_method=None, + dist_init_required=None, + config=None, + rank=-1, + world_size=-1): + ''' Initialize dist backend, potentially performing MPI discovery if needed + + Arguments: + dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo, hccl + auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI + distributed_port: Optional (int). torch distributed backend port + verbose: Optional (bool). verbose logging + timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes. + init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified. + config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling) + rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization) + world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization. + ''' + global cdb + + configure(deepspeed_config=config) + + if dist_init_required is None: + dist_init_required = cdb is None or not cdb.is_initialized() + + if cdb is None: + init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method) + set_backend() + utils.logger.info(f'cdb={cdb}') + if cdb is None and torch.distributed.is_initialized(): + # The user initialized torch.dist themselves, create cdb and short-circuit + cdb = TorchBackend(dist_backend, timeout, init_method) + return + + if dist_init_required is False: + assert ( + cdb is not None and cdb.is_initialized() is True + ), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()" + else: + # Initialize torch distributed if needed + required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)): + if verbose: + utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...") + if in_aml() and not in_dlts(): + patch_aml_env_for_torch_nccl_backend(verbose=verbose) + elif in_aws_sm(): + patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose) + else: + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + + if cdb is not None and cdb.is_initialized(): + if int(os.getenv('RANK', '0')) == 0: + utils.logger.info('Distributed backend already initialized') + else: + assert isinstance(timeout, timedelta) + if dist_backend is None: + dist_backend = get_accelerator().communication_backend_name() + if int(os.getenv('RANK', '0')) == 0: + utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend)) + # Create a torch backend object, initialize torch distributed, and assign to cdb + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + + +def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True): + ''' + Discovery MPI environment via mpi4py and map to relevant dist state + ''' + from mpi4py import MPI + import subprocess + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + world_size = comm.Get_size() + + master_addr = None + if rank == 0: + hostname_cmd = ["hostname -I"] + result = subprocess.check_output(hostname_cmd, shell=True) + master_addr = result.decode('utf-8').split()[0] + master_addr = comm.bcast(master_addr, root=0) + + # Determine local rank by assuming hostnames are unique + proc_name = MPI.Get_processor_name() + all_procs = comm.allgather(proc_name) + local_rank = sum([i == proc_name for i in all_procs[:rank]]) + + os.environ['RANK'] = str(rank) + os.environ['WORLD_SIZE'] = str(world_size) + os.environ['LOCAL_RANK'] = str(local_rank) + os.environ['MASTER_ADDR'] = master_addr + os.environ['MASTER_PORT'] = str(distributed_port) + + if verbose: + utils.logger.info( + "Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}". + format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) + + if cdb is not None and cdb.is_initialized(): + assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank()) + assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format( + world_size, cdb.get_world_size()) + + +def in_aml(): + # Are we running inside an Azure Machine Learning (AML) environment? + return 'AZUREML_EXPERIMENT_ID' in os.environ + + +def in_aws_sm(): + # Are we running inside an AWS SageMaker environment? + return 'SM_TRAINING_ENV' in os.environ + + +def in_dlts(): + # Are we running on a DLTS cluster? + return 'DLTS_JOB_ID' in os.environ + + +def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True): + """Helper routine to get and set environment variables. + This is adapted from Azure ML's documentation available from: + https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi + """ + os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] + os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] + single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"]) + + if not single_node: + master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":") + os.environ["MASTER_ADDR"] = master_node_params[0] + # Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE + if "MASTER_PORT" not in os.environ: + os.environ["MASTER_PORT"] = str(master_port) + else: + os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"] + os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT + + if verbose: + utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"])) + + os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME + os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] + + if verbose: + utils.logger.info( + "Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" + .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) + + +def patch_aws_sm_env_for_torch_nccl_backend(verbose=True): + """Helper routine to get and set environment variables when running inside an AWS SageMaker environment. + """ + os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] + os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] + os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] + + if verbose: + utils.logger.info( + "Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" + .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/config.py b/venv/lib/python3.10/site-packages/deepspeed/comm/config.py new file mode 100644 index 0000000000000000000000000000000000000000..1c441bb6bfe96fa1756fe67b4d2cb08f06889a98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/config.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .constants import * +from ..pydantic_v1 import BaseModel + + +class CommsConfig(BaseModel): + + class Config: + validate_all = True + validate_assignment = True + use_enum_values = True + extra = 'forbid' + + +class CommsLoggerConfig(CommsConfig): + enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT + prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT + prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT + verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT + debug: bool = COMMS_LOGGER_DEBUG_DEFAULT + + +class DeepSpeedCommsConfig: + + def __init__(self, ds_config): + self.comms_logger_enabled = 'comms_logger' in ds_config + + if self.comms_logger_enabled: + self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger']) diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/constants.py b/venv/lib/python3.10/site-packages/deepspeed/comm/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..50d234c93fa0df954d5aec18cda209aa3909a32e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/constants.py @@ -0,0 +1,51 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +NCCL_BACKEND = 'nccl' +CCL_BACKEND = 'ccl' +MPI_BACKEND = 'mpi' +GLOO_BACKEND = 'gloo' +SCCL_BACKEND = 'sccl' +HCCL_BACKEND = 'hccl' + +DEFAULT_AML_MASTER_PORT = "54965" +DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo" + +######################################### +# Comms Logger +######################################### +# Comms Logger. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +COMMS_LOGGER_FORMAT = ''' +The Comms Logger can be specified as: +"comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": true, + "debug": false, + "prof_ops": ["all_reduce", "custom_all_reduce_name"] +} +''' +COMMS_LOGGER = "comms_logger" + +# Comms logger enable signal +COMMS_LOGGER_ENABLED = "enabled" +COMMS_LOGGER_ENABLED_DEFAULT = False + +# Comms logger verbose signal +COMMS_LOGGER_VERBOSE = "verbose" +COMMS_LOGGER_VERBOSE_DEFAULT = False + +# comms logger profile all ops signal +COMMS_LOGGER_PROF_ALL = "prof_all" +COMMS_LOGGER_PROF_ALL_DEFAULT = True + +# comms logger show all ops signal +COMMS_LOGGER_DEBUG = "debug" +COMMS_LOGGER_DEBUG_DEFAULT = False + +# comms logger profile specific ops in list +COMMS_LOGGER_PROF_OPS = "prof_ops" +COMMS_LOGGER_PROF_OPS_DEFAULT = [] diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/utils.py b/venv/lib/python3.10/site-packages/deepspeed/comm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78bfdf4462e4f57458a1137cb24a271e09fc1b6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/utils.py @@ -0,0 +1,133 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import inspect +from deepspeed.utils import get_caller_func + + +def get_local_rank_from_launcher(): + + # DeepSpeed launcher will set it so get from there + rank = os.environ.get('LOCAL_RANK') + + if rank is None: + rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') + + # Make it a single process job and set rank to 0 + if rank is None: + rank = 0 + + return int(rank) + + +def get_world_rank_from_launcher(): + + # DeepSpeed launcher will set it so get from there + rank = os.environ.get('RANK') + + if rank is None: + rank = os.environ.get('OMPI_COMM_WORLD_RANK') + + # Make it a single process job and set rank to 0 + if rank is None: + rank = 0 + + return int(rank) + + +def get_world_size_from_launcher(): + # DeepSpeed launcher will set it so get from there + size = os.environ.get('WORLD_SIZE') + rank = os.environ.get('RANK') + + if size is None: + size = os.environ.get('OMPI_COMM_WORLD_SIZE') + + # Make it a single process job and set size to 1 + if size is None: + size = 1 + + if rank == 0: + print(f"set world size to {size}") + + return int(size) + + +def get_default_args(func): + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +# We need this hacky function since torch doesn't consistently name or place the input tensor args +def get_tensor_position(func): + sig_params = inspect.signature(func).parameters + arg = None + # most colls + if 'tensor' in sig_params: + arg = 'tensor' + # all_reduce_coalesced coll + elif 'tensors' in sig_params: + arg = 'tensors' + # reduce scatter coll + elif 'input_list' in sig_params: + arg = 'input_list' + # all_to_all and torch multiGPU colls + elif 'input_tensor_list' in sig_params: + arg = 'input_tensor_list' + if arg is None: + return -1 + else: + return list(sig_params).index(arg) + + +def get_tensor_kwarg(func, kwargs): + func_args = get_default_args(func) + func_args.update(kwargs) + arg = None + + if 'tensor' in func_args: + arg = func_args['tensor'] + elif 'tensors' in func_args: + arg = func_args['tensors'] + elif 'input_list' in func_args: + arg = func_args['input_list'] + elif 'input_tensor_list' in func_args: + arg = func_args['input_tensor_list'] + return arg + + +def get_msg_size_from_args(func, *args, **kwargs): + # 3 cases: + # - tensor arg is in args + # - tensor arg is in kwargs + # - tensor arg is not present (e.g. barrier) + tensor_arg_position = -1 + tensor_arg = None + # check if tensor arg is in args + if len(args) > 0: + tensor_arg_position = get_tensor_position(func) + if tensor_arg_position > -1: + tensor_arg = args[get_tensor_position(func)] + # check if tensor arg is in kwargs + if tensor_arg is None and len(kwargs) > 0: + tensor_arg = get_tensor_kwarg(func, kwargs) + # if tensor arg is not present, no data is being transmitted + if tensor_arg is None: + return 0 + else: + # Sum of tensor sizes for list colls such as torch's all_to_all + # NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted. + if type(tensor_arg) is list: + return sum(x.element_size() * x.nelement() for x in tensor_arg) + else: + return tensor_arg.element_size() * tensor_arg.nelement() + + +def get_debug_log_name(func_args, debug): + if debug: + return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']' + else: + return func_args['log_name'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ac2c2706fe0e8240901f847519feba7d302cb2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e5c3fb1098347480c5d4017062f24fe060ffa28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03cde6c49084ddf8021667f5d277e76273601532 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50c32f4bd5087737559057a4a0980a0e7bfb0912 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base_moe.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base_moe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68a67d62eb81d9a54d077109696e048d43d62936 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base_moe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/opt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c700171727b88b4cf9599b999cbf762a10a6667 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/opt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/moe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ac46f5c883e4631d11b157649213447d6acb3ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/experts.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/experts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2327df7fb7fbdfff49f6655161f52007e2eb7ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/experts.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a683c18f2bc8c3e13b81e67080d42ce5c39a11c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/layer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/mappings.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0464cd108be0514b8b1396920f0a343b07915560 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/mappings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/sharded_moe.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/sharded_moe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6db1f66da0717f7f86e0c367d8a09c6eddbe689a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/sharded_moe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..078ebd8fd0f4d4a7ab14bd97d601b62609d733a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/experts.py b/venv/lib/python3.10/site-packages/deepspeed/moe/experts.py new file mode 100644 index 0000000000000000000000000000000000000000..0863221d7edf93770af126e3fa1a78be3c7f4289 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/experts.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import copy +from typing import List, Optional + +import torch +from torch import nn + + +class Experts(nn.Module): + + def __init__(self, expert: nn.Module, num_local_experts: int = 1, expert_group_name: Optional[str] = None) -> None: + super(Experts, self).__init__() + + self.deepspeed_experts = nn.ModuleList([copy.deepcopy(expert) for _ in range(num_local_experts)]) + self.num_local_experts = num_local_experts + + # TODO: revisit allreduce for moe.gate... + for expert in self.deepspeed_experts: + # TODO: Create param groups to handle expert + data case (e.g. param.group = moe_group) + for param in expert.parameters(): + param.allreduce = False + param.group_name = expert_group_name + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + chunks = inputs.chunk(self.num_local_experts, dim=1) + expert_outputs: List[torch.Tensor] = [] + + for chunk, expert in zip(chunks, self.deepspeed_experts): + out = expert(chunk) + if isinstance(out, tuple): + out = out[0] # Ignore the bias term for now + expert_outputs += [out] + + return torch.cat(expert_outputs, dim=1) diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/layer.py b/venv/lib/python3.10/site-packages/deepspeed/moe/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..6777788ab885a618544ced9ed9448e08c5159751 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/layer.py @@ -0,0 +1,132 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional, Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from deepspeed.utils import groups, log_dist +from .experts import Experts +from .sharded_moe import MOELayer, TopKGate + + +class MoE(nn.Module): + """Initialize an MoE layer. + + Arguments: + hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension. + expert (nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear). + num_experts (int, optional): default=1, the total number of experts per layer. + ep_size (int, optional): default=1, number of ranks in the expert parallel world or group. + k (int, optional): default=1, top-k gating value, only supports k=1 or k=2. + capacity_factor (float, optional): default=1.0, the capacity of the expert at training time. + eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time. + min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor. + use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer. + noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'. + drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity). + use_rts (bool, optional): default=True, whether to use Random Token Selection. + use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). + enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts + top2_2nd_expert_sampling (bool, optional): default=True, whether to perform sampling for 2nd expert + """ + + def __init__(self, + hidden_size: int, + expert: nn.Module, + num_experts: int = 1, + ep_size: int = 1, + k: int = 1, + capacity_factor: float = 1.0, + eval_capacity_factor: float = 1.0, + min_capacity: int = 4, + use_residual: bool = False, + noisy_gate_policy: Optional[str] = None, + drop_tokens: bool = True, + use_rts: bool = True, + use_tutel: bool = False, + enable_expert_tensor_parallelism: bool = False, + top2_2nd_expert_sampling: bool = True) -> None: + + super(MoE, self).__init__() + + self.use_residual = use_residual + self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism + assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})" + self.ep_size = ep_size + self.expert_group_name = f"ep_size_{self.ep_size}" + self.num_experts = num_experts + self.num_local_experts = num_experts // self.ep_size + + log_dist( + f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', + [0]) + + assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \ + 'Unsupported noisy_gate_policy: ' + noisy_gate_policy + + experts = Experts(expert, self.num_local_experts, self.expert_group_name) + self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor, + min_capacity, noisy_gate_policy, drop_tokens, use_rts, None, + top2_2nd_expert_sampling), + experts, + self.expert_group_name, + self.ep_size, + self.num_local_experts, + use_tutel=use_tutel) + if self.use_residual: + self.mlp = expert + # coefficient is used for weighted sum of the output of expert and mlp + self.coefficient = nn.Linear(hidden_size, 2) + + def set_deepspeed_parallelism(self, use_data_before_expert_parallel_: bool = False) -> None: + self._create_process_groups(use_data_before_expert_parallel_=use_data_before_expert_parallel_) + + def _create_process_groups(self, use_data_before_expert_parallel_: bool = False) -> None: + # Create process group for a layer if needed + if self.expert_group_name not in groups._get_expert_parallel_group_dict(): + print(f"No existing process group found, creating a new group named: {self.expert_group_name}") + if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism): + # Condition 1 - no groups.mpu means no tensor parallelism + # Condition 2 - disabling expert tensor parallelism on purpose + groups._create_expert_and_data_parallel( + self.ep_size, use_data_before_expert_parallel_=use_data_before_expert_parallel_) + else: + # expert tensor parallelism is enabled + groups._create_expert_data_and_model_parallel( + self.ep_size, mpu=groups.mpu, use_data_before_expert_parallel_=use_data_before_expert_parallel_) + # Set the group handle for the MOELayer (deepspeed_moe) object + self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name)) + + def forward(self, + hidden_states: torch.Tensor, + used_token: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ MoE forward + + Arguments: + hidden_states (Tensor): input to the layer + used_token (Tensor, optional): default: None, mask only used tokens + + Returns: + A tuple including output, gate loss, and expert count. + + * output (Tensor): output of the model + + * l_aux (Tensor): gate loss value + + * exp_counts (Tensor): expert count + """ + output = self.deepspeed_moe(hidden_states, used_token) + if self.use_residual: + # Residual MoE + output_mlp = self.mlp(hidden_states) + if isinstance(output_mlp, tuple): + output_mlp = output_mlp[0] # Ignore the bias term for now + coef = self.coefficient(hidden_states) + coef = F.softmax(coef, dim=-1) + output = output * coef[..., 0:1] + output_mlp * coef[..., 1:] + return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/mappings.py b/venv/lib/python3.10/site-packages/deepspeed/moe/mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..b8a06274343a14a8a3912ff83128488a731fda72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/mappings.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# The file has been adapted from the following Megatron-LM file: +# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py +# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796 +# We retain the following copyright from the original files: + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import deepspeed +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_world_size, bwc_tensor_model_parallel_rank, + bwc_tensor_model_parallel_group) + + +def _gather_tokens(input_, dim=0): + """Gather tensors and concatenate them along a dimension""" + mpu = deepspeed.utils.groups.mpu + + input_ = input_.contiguous() + # Size and dimension. + rank = bwc_tensor_model_parallel_rank(mpu) + + tensor_list = [torch.empty_like(input_) for _ in range(bwc_tensor_model_parallel_world_size(mpu))] + tensor_list[rank] = input_ + deepspeed.comm.all_gather(tensor_list, input_, group=bwc_tensor_model_parallel_group(mpu)) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=dim).contiguous() + + return output + + +def _drop_tokens(input_, dim=0): + """Divide a tensor among the tensor parallel ranks""" + mpu = deepspeed.utils.groups.mpu + + total_chunks = bwc_tensor_model_parallel_world_size(mpu) + this_chunk = bwc_tensor_model_parallel_rank(mpu) + assert input_.shape[ + dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})" + chunk_size = input_.shape[dim] // total_chunks + + return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size) + + +class _GatherTokens(torch.autograd.Function): + """All gather tokens among the tensor parallel ranks""" + + @staticmethod + def symbolic(graph, input_, dim): + return _gather_tokens(input_, dim) + + @staticmethod + def forward(ctx, input_, dim): + ctx.dim = dim + return _gather_tokens(input_, dim) + + @staticmethod + def backward(ctx, grad_output): + return _drop_tokens(grad_output, ctx.dim), None + + +class _DropTokens(torch.autograd.Function): + "Divide tokens equally among the tensor parallel ranks" + + @staticmethod + def symbolic(graph, input_, dim): + return _drop_tokens(input_, dim) + + @staticmethod + def forward(ctx, input_, dim): + ctx.dim = dim + return _drop_tokens(input_, dim) + + @staticmethod + def backward(ctx, input_): + return _gather_tokens(input_, ctx.dim), None + + +def gather_tokens(input_, dim=0): + mpu = deepspeed.utils.groups.mpu + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: + # no tensor parallelism for non-experts + return input_ + return _GatherTokens.apply(input_, dim) + + +def drop_tokens(input_, dim=0): + mpu = deepspeed.utils.groups.mpu + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: + # no tensor parallelism for non-experts + return input_ + return _DropTokens.apply(input_, dim) diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/sharded_moe.py b/venv/lib/python3.10/site-packages/deepspeed/moe/sharded_moe.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2782279c01abb21793dbeb3e18681571569e83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/sharded_moe.py @@ -0,0 +1,585 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +The file has been adapted from two fairscale files: + (1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py + (2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py + Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf + We retain the following license from the original files: +""" + +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +from deepspeed.utils.timer import SynchronizedWallClockTimer +from deepspeed.utils import logger +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size +from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union + +import torch +from torch import Tensor +from torch.nn import Module +import torch.nn.functional as F +from deepspeed.utils import groups +from .mappings import drop_tokens, gather_tokens + +if TYPE_CHECKING: + Base = Module[Tensor] +else: + Base = Module + +TOPK_GATE_TIMER = 'topk_gate' +MOE_TIMER = 'moe' +FIRST_ALLTOALL_TIMER = '1st_a2a' +SECOND_ALLTOALL_TIMER = '2nd_a2a' + +uniform_map: Dict[torch.device, Callable] = {} +gumbel_map: Dict[torch.device, Callable] = {} +exp_selection_uniform_map: Dict[torch.device, Callable] = {} + +try: + # To enable Tutel MoE optimizations: + # python3 -m pip install --user --upgrade git+https://github.com/microsoft/tutel@v0.1.x + from tutel import moe as tutel_moe + TUTEL_INSTALLED = True +except: + # Fail silently so we don't spam logs unnecessarily if user isn't using tutel + TUTEL_INSTALLED = False + pass + + +def multiplicative_jitter(x, device: torch.device, epsilon=1e-2): + """ + Modified from switch transformer paper. mesh transformers + Multiply values by a random number between 1-epsilon and 1+epsilon. + Makes models more resilient to rounding errors introduced by bfloat16. + This seems particularly important for logits. + Args: + x: a torch.tensor + device: torch.device + epsilon: a floating point value + Returns: + a jittered x. + """ + if epsilon == 0: + return x + uniform = uniform_map.get(device) + if uniform is None: + uniform = torch.distributions.uniform.Uniform(low=torch.tensor(1.0 - epsilon, device=device), + high=torch.tensor(1.0 + epsilon, + device=device)).rsample # type: ignore + uniform_map[device] = uniform + return x * uniform(x.shape) + + +def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: + gumbel = gumbel_map.get(device) + if gumbel is None: + one = torch.tensor(1.0, device=device) + zero = torch.tensor(0.0, device=device) + gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore + gumbel_map[device] = gumbel + return gumbel(shape) + + +from deepspeed import comm as dist + +# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity +# See https://arxiv.org/pdf/2006.16668.pdf for details. + + +# Based on https://github.com/pytorch/pytorch/pull/40762 +class _AllToAll(torch.autograd.Function): + + @staticmethod + def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore + ctx.group = group + input = input.contiguous() + output = torch.empty_like(input) + dist.all_to_all_single(output, input, group=group) + return output + + @staticmethod + def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]: + return (None, _AllToAll.apply(ctx.group, *grad_output)) + + +# einsum rewrites are on par or more performant +# switch can be bubbled up in future +USE_EINSUM = True + + +# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity +# See https://arxiv.org/pdf/2006.16668.pdf for details. +def einsum(rule, a, b): + if USE_EINSUM: + return torch.einsum(rule, a, b) + elif rule == 's,se->se': + return a.reshape(a.shape[0], -1) * b + elif rule == 'se,sc->sec': + return a.unsqueeze(2) * b.unsqueeze(1) + elif rule == 'se,se->s': + return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1) + elif rule == 'sec,sm->ecm': + s = a.shape[0] + e = a.shape[1] + c = a.shape[2] + m = b.shape[1] + return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m) + elif rule == 'sec,ecm->sm': + return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1])) + elif rule == 'ks,ksm->sm': + k = b.shape[0] + s = b.shape[1] + m = b.shape[2] + # [k, s] -> [s, k] -> [s, 1, k] + a = a.t().unsqueeze(1) + # [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k] + b = b.reshape(k, -1).t().reshape(s, m, k) + # bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1] + return torch.bmm(a, b.transpose(1, 2)).squeeze(2) + else: + return torch.einsum(rule, a, b) + + +# The following functions are extracted and scripted +# because otherwise during a torch.jit.trace, the non-Tensor +# values used in the calculations get recorded as constants. +# torch.jit.script coerces them into Tensors and preserves +# their dynamic shapes. This enables ONNX export. +# We can't script the entire top1gating function because it +# includes stateful caching logic which is incompatible with ONNX. + + +@torch.jit.script +def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor: + # gates has shape of SE + num_tokens = gates.shape[0] + num_experts = gates.shape[1] + # to(torch.int64) works around a bug in torch.onnx.export: + # it should cast k to int64 when converting torch.topk but it doesn't. + capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64) + if capacity < min_capacity: + capacity = min_capacity.to(torch.int64) + return capacity + + +@torch.jit.script +def _top_idx(source, k): + return torch.topk(source, k=k, dim=0)[1] + + +@torch.jit.script +def _one_hot_to_float(x, num_classes): + return F.one_hot(x, num_classes=num_classes).float() + + +def top1gating(logits: Tensor, + capacity_factor: float, + min_capacity: int, + used_token: Tensor = None, + noisy_gate_policy: Optional[str] = None, + drop_tokens: bool = True, + use_rts: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Implements Top1Gating on logits.""" + if noisy_gate_policy == 'RSample': + logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) + # everything is in fp32 in this function + gates = F.softmax(logits, dim=1) + + capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity)) + + # Create a mask for 1st's expert per token + # noisy gating + indices1_s = torch.argmax(logits_w_noise if noisy_gate_policy == 'RSample' else gates, dim=1) + num_experts = int(gates.shape[1]) + mask1 = F.one_hot(indices1_s, num_classes=num_experts) + + # mask only used tokens + if used_token is not None: + mask1 = einsum("s,se->se", used_token, mask1) + + # gating decisions + exp_counts = torch.sum(mask1, dim=0).detach().to('cpu') + + # if we don't want to drop any tokens + if not drop_tokens: + new_capacity = torch.max(exp_counts).to(logits.device) + # Communicate across expert processes to pick the maximum capacity. + if ep_group is not None: + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) + if groups._get_expert_model_parallel_world_size() == 1: + # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. + # This is since we are going to activate drop_tokens() to drop duplicate tokens. + tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) + new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) + # Make sure the capacity value does not exceed the number of tokens. + capacity = min(new_capacity, torch.tensor(mask1.size(0))) + + # Compute l_aux + me = torch.mean(gates, dim=0) + ce = torch.mean(mask1.float(), dim=0) + l_aux = torch.sum(me * ce) * num_experts + + # Random Token Selection + if use_rts: + uniform = exp_selection_uniform_map.get(logits.device) + if uniform is None: + uniform = torch.distributions.uniform.Uniform(low=torch.tensor(0.0, device=logits.device), + high=torch.tensor(1.0, device=logits.device)).rsample + exp_selection_uniform_map[logits.device] = uniform + + mask1_rand = mask1 * uniform(mask1.shape) + else: + mask1_rand = mask1 + + assert logits.shape[ + 0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size." + + top_idx = _top_idx(mask1_rand, capacity) + + new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1) + mask1 = new_mask1 + + if use_tutel: + # Tutel doesn't support index values masked with zero + # so we need to replace masked indices with -1 + indices_mask = mask1.sum(dim=1) * num_experts - 1 + indices1_s = torch.min(indices1_s, indices_mask) + + # Compute locations in capacity buffer + if use_tutel: + locations1 = tutel_moe.fast_cumsum_sub_one(mask1) + else: + locations1 = torch.cumsum(mask1, dim=0) - 1 + + if use_tutel: + gates1_s = (gates * mask1).sum(dim=1) + locations1_s = torch.sum(locations1 * mask1, dim=1) + return l_aux, capacity, num_experts, [ + indices1_s, + ], [ + locations1_s, + ], [ + gates1_s, + ], exp_counts + + # Store the capacity location for each token + locations1_s = torch.sum(locations1 * mask1, dim=1) + + # Normalize gate probabilities + mask1_float = mask1.float() + gates = gates * mask1_float + + locations1_sc = _one_hot_to_float(locations1_s, capacity) + combine_weights = einsum("se,sc->sec", gates, locations1_sc) + + dispatch_mask = combine_weights.bool() + + return l_aux, combine_weights, dispatch_mask, exp_counts + + +def top2gating(logits: Tensor, + capacity_factor: float, + min_capacity: int, + drop_tokens: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + """Implements Top2Gating on logits.""" + # everything is in fp32 in this function + gates = F.softmax(logits, dim=1) + + # Create a mask for 1st's expert per token + indices1_s = torch.argmax(gates, dim=1) + num_experts = int(gates.shape[1]) + mask1 = F.one_hot(indices1_s, num_classes=num_experts) + + if top2_2nd_expert_sampling: + # Create a mask for 2nd's expert per token using Gumbel-max trick + # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ + logits += gumbel_rsample(logits.shape, device=logits.device) + + # Replace top-expert with min value + logits_except1 = logits.masked_fill(mask1.bool(), float("-inf")) + indices2_s = torch.argmax(logits_except1, dim=1) + mask2 = F.one_hot(indices2_s, num_classes=num_experts) + + # Compute locations in capacity buffer + locations1 = torch.cumsum(mask1, dim=0) - 1 + locations2 = torch.cumsum(mask2, dim=0) - 1 + # Update 2nd's location by accounting for locations of 1st + locations2 += torch.sum(mask1, dim=0, keepdim=True) + + # Compute l_aux + me = torch.mean(gates, dim=0) + ce = torch.mean(mask1.float(), dim=0) + l_aux = torch.mean(me * ce) * num_experts * num_experts + + # gating decisions + exp_counts = torch.sum(mask1 + mask2, dim=0) + + if drop_tokens: + # Calculate configured capacity and remove locations outside capacity from mask + capacity = _capacity(gates, torch.tensor(capacity_factor * 2), torch.tensor(min_capacity)) + mask1 *= torch.lt(locations1, capacity) + mask2 *= torch.lt(locations2, capacity) + else: + # Do not drop tokens - set capacity according to current expert assignments + new_capacity = torch.max(exp_counts) + if ep_group is not None: + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group) + if groups._get_expert_model_parallel_world_size() == 1: + # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. + # This is since we are going to activate drop_tokens() to drop duplicate tokens. + tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) + new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) + capacity = new_capacity + + # Store the capacity location for each token + locations1_s = torch.sum(locations1 * mask1, dim=1) + locations2_s = torch.sum(locations2 * mask2, dim=1) + + # Normalize gate probabilities + mask1_float = mask1.float() + mask2_float = mask2.float() + gates1_s = einsum("se,se->s", gates, mask1_float) + gates2_s = einsum("se,se->s", gates, mask2_float) + denom_s = gates1_s + gates2_s + # Avoid divide-by-zero + denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) + gates1_s /= denom_s + gates2_s /= denom_s + + # Calculate combine_weights and dispatch_mask + gates1 = einsum("s,se->se", gates1_s, mask1_float) + gates2 = einsum("s,se->se", gates2_s, mask2_float) + locations1_sc = _one_hot_to_float(locations1_s, capacity) + locations2_sc = _one_hot_to_float(locations2_s, capacity) + combine1_sec = einsum("se,sc->sec", gates1, locations1_sc) + combine2_sec = einsum("se,sc->sec", gates2, locations2_sc) + combine_weights = combine1_sec + combine2_sec + dispatch_mask = combine_weights.bool() + + return l_aux, combine_weights, dispatch_mask, exp_counts.detach().to('cpu') + + +class TopKGate(Module): + """Gate module which implements Top2Gating as described in Gshard_. + :: + + gate = TopKGate(model_dim, num_experts) + l_aux, combine_weights, dispatch_mask = gate(input) + + .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf + + Args: + model_dim (int): + size of model embedding dimension + num_experts (int): + number of experts in model + """ + + wg: torch.nn.Linear + + def __init__(self, + model_dim: int, + num_experts: int, + k: int = 1, + capacity_factor: float = 1.0, + eval_capacity_factor: float = 1.0, + min_capacity: int = 8, + noisy_gate_policy: Optional[str] = None, + drop_tokens: bool = True, + use_rts: bool = True, + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + top2_2nd_expert_sampling: bool = True) -> None: + super().__init__() + + # Only top-1 and top-2 are supported at the moment. + if k != 1 and k != 2: + raise ValueError('Only top-1 and top-2 gatings are supported.') + self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) + self.ep_group = ep_group + self.k = k + self.capacity_factor = capacity_factor + self.eval_capacity_factor = eval_capacity_factor + self.min_capacity = min_capacity + self.noisy_gate_policy = noisy_gate_policy + self.timers = SynchronizedWallClockTimer() + self.wall_clock_breakdown = False + self.gate_time = 0.0 + self.drop_tokens = drop_tokens + self.use_rts = use_rts + self.top2_2nd_expert_sampling = top2_2nd_expert_sampling + + def _set_ep_group(self, ep_group): + assert self.ep_group is None, f'Attempting to override an existing ep_group' + self.ep_group = ep_group + + def forward(self, + input: torch.Tensor, + used_token: torch.Tensor = None, + use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore + + if self.wall_clock_breakdown: + self.timers(TOPK_GATE_TIMER).start() + + input_fp32 = input.float() + # input jittering + if self.noisy_gate_policy == 'Jitter' and self.training: + input_fp32 = multiplicative_jitter(input_fp32, device=input.device) + logits = torch.nn.functional.linear(input_fp32, weight=self.wg.weight.float(), bias=None) + + if self.k == 1: + gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, + self.min_capacity, used_token, self.noisy_gate_policy if self.training else None, + self.drop_tokens, self.use_rts, self.ep_group, use_tutel) + + else: + gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, + self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling) + + if self.wall_clock_breakdown: + self.timers(TOPK_GATE_TIMER).stop() + self.gate_time = self.timers(TOPK_GATE_TIMER).elapsed(reset=False) + + return gate_output + + +class MOELayer(Base): + """MOELayer module which implements MixtureOfExperts as described in Gshard_. + :: + + gate = TopKGate(model_dim, num_experts) + moe = MOELayer(gate, expert) + output = moe(input) + l_aux = moe.l_aux + + .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf + + Args: + gate (torch.nn.Module): + gate network + expert (torch.nn.Module): + expert network + """ + + def __init__(self, + gate: Module, + experts: Module, + ep_group_name, + ep_size, + num_local_experts: int, + use_tutel: bool = False) -> None: + super().__init__() + self.gate = gate + self.experts = experts + self.ep_group = None + self.ep_size = ep_size + self.ep_group_name = ep_group_name + self.num_local_experts = num_local_experts + self.time_falltoall = 0.0 + self.time_salltoall = 0.0 + self.time_moe = 0.0 + self.timers = SynchronizedWallClockTimer() + self.wall_clock_breakdown = False + + self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1 + + if self.use_tutel: + logger.info('Using Tutel optimizations.') + elif use_tutel and not TUTEL_INSTALLED: + logger.warning("Tutel optimization requested but not installed. " + "Proceeding without Tutel.") + elif use_tutel and TUTEL_INSTALLED and gate.k != 1: + logger.warning("To enable Tutel optimization, use top-1 instead of top-2 gate. " + "Proceeding without Tutel.") + + def _set_ep_group(self, ep_group): + self.ep_group = ep_group + self.gate._set_ep_group(ep_group) + + def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: + + if self.wall_clock_breakdown: + self.timers(MOE_TIMER).start() + + # Implement Algorithm 2 from GShard paper. + d_model = input[0].shape[-1] + + # Initial implementation -> Reshape into S tokens by dropping sequence dimension. + # Reshape into G groups so that each group can distribute tokens equally + # group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1 + reshaped_input = input[0].reshape(-1, d_model) + + if self.use_tutel: + self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True) + S, M = reshaped_input.size(0), reshaped_input.size(1) + + if not hasattr(self, '_tutel_dispatcher'): + self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype) + self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) + dispatched_input = self._tutel_dispatcher.encode(reshaped_input) + else: + self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) + dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input) + + if self.wall_clock_breakdown: + self.timers(FIRST_ALLTOALL_TIMER).start() + + if groups._get_expert_model_parallel_world_size() == 1: + # If the non-expert is tensor-parallel, it will create + # duplicate tokens on the tensor-parallel ranks. + # Since our experts are not tensor-parallel, these duplicates + # need to be dropped to ensure correctness. + # this also doubles up as a communication optimization as we are + # reducing the all-to-all communication volume. + dispatched_input = drop_tokens(dispatched_input, dim=1) + + dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input) + + if self.wall_clock_breakdown: + self.timers(FIRST_ALLTOALL_TIMER).stop() + self.time_falltoall = self.timers(FIRST_ALLTOALL_TIMER).elapsed(reset=False) + + # Re-shape after all-to-all: ecm -> gecm + dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model) + + expert_output = self.experts(dispatched_input) + + if self.wall_clock_breakdown: + self.timers(SECOND_ALLTOALL_TIMER).start() + + expert_output = _AllToAll.apply(self.ep_group, expert_output) + + if self.wall_clock_breakdown: + self.timers(SECOND_ALLTOALL_TIMER).stop() + self.time_salltoall = self.timers(SECOND_ALLTOALL_TIMER).elapsed(reset=False) + + # Re-shape back: gecm -> ecm + expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model) + + if groups._get_expert_model_parallel_world_size() == 1: + # the dropped duplicate tokens need to be gathered on each + # tensor parallel rank again for the tensor-parallel + # non-expert of the next layer. + expert_output = gather_tokens(expert_output, dim=1) + + if self.use_tutel: + combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M)) + else: + combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output) + + a = combined_output.reshape(input[0].shape) + + if self.wall_clock_breakdown: + self.timers(MOE_TIMER).stop() + self.time_moe = self.timers(MOE_TIMER).elapsed(reset=False) + + return a diff --git a/venv/lib/python3.10/site-packages/deepspeed/moe/utils.py b/venv/lib/python3.10/site-packages/deepspeed/moe/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..20866378efac72c96a7e2f56bdf97e5b7b4effb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/moe/utils.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from collections import defaultdict +from typing import Any, Dict, List, Set, Tuple, Union, cast + +import torch +from torch import nn + +from .layer import MoE + + +def has_moe_layers(m: nn.Module) -> Tuple[bool, int]: + has_moe = False + num_experts = 0 + + for module in m.modules(): + if isinstance(module, MoE): + has_moe = True + num_experts = module.num_experts + break + return has_moe, num_experts + + +def is_moe_param(param: torch.Tensor) -> bool: + if hasattr(param, "allreduce") and not param.allreduce: + return True + return False + + +def split_params_into_shared_and_expert_params( + params: List[torch.nn.Parameter]) -> Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]: + shared_params: List[nn.Parameter] = [] + expert_params: List[nn.Parameter] = [] + + for p in params: + if is_moe_param(p): + expert_params.append(p) + else: + shared_params.append(p) + return shared_params, expert_params + + +def split_params_grads_into_shared_and_expert_params( + group: List[torch.nn.Parameter]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """Split grad of parameters into grads of non-expert params + and grads of expert params. This is useful while computing + grad-norms for clipping and overflow detection + + group (List[torch.nn.Parameter]): + Args: + The group of parameters to split + + Returns: + Tuple[List[torch.Tensor], List[torch.Tensor]]: + list of gradients for non MoE params, list of gradients of MoE params + """ + expert_grads: List[torch.Tensor] = [] + shared_grads: List[torch.Tensor] = [] + + for p in group: + if p.grad is not None: + if is_moe_param(p): + expert_grads.append(p.grad.to(p.dtype)) + else: + shared_grads.append(p.grad.to(p.dtype)) + return shared_grads, expert_grads + + +def split_params_into_different_moe_groups_for_optimizer( + param_groups: Union[Dict[str, Any], Tuple[Dict[str, Any], ...], List[Dict[str, Any]]], + max_group_size: Union[int, float] = 178956971) -> List[Dict[str, Any]]: + """Split parameters into different MoE groups for optimizer + + Args: + param_groups (Union[Dict[str, Any], Tuple[Dict[str, Any], ...], List[Dict[str, Any]]]) + The list of parameter groups to split + + Returns: + List[Dict[str, Any]]: + list of MoE/non-MoE groups for optimizer + """ + if isinstance(param_groups, tuple): + param_groups = list(param_groups) # Tuple cannot be modified + elif isinstance(param_groups, dict): + param_groups = [param_groups] + elif not isinstance(param_groups, list): + raise ValueError(f"Unknown param group type of {type(param_groups)}") + + # gather all data parallel group names + data_parallel_group_names: Set[str] = set() + for param_group in param_groups: + for param in cast(List[nn.Parameter], param_group["params"]): + if is_moe_param(param): + data_parallel_group_names.add(param.group_name) + + # Create the param MoE groups, leave param assign to next step + group_moe: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(lambda: defaultdict(dict)) + for param_group in param_groups: + for key in data_parallel_group_names: + group_moe[param_group['name']][key] = { + **param_group, + 'name': key, + 'moe': True, + 'params': [], + } + + # Assign param + for param_group in param_groups: + new_params: List[nn.Parameter] = [] + + for param in cast(List[nn.Parameter], param_group['params']): + if is_moe_param(param): + group_moe[param_group['name']][param.group_name]['params'].append(param) + else: + new_params.append(param) + param_group['params'] = new_params + + # Flatten the moe groups + if max_group_size is not None: + for moe_group in group_moe.values(): + for param_group in moe_group.values(): + cur_group: List[nn.Parameter] = [] + all_groups: List[List[nn.Parameter]] = [] + size_of_cur_group = 0 + + for param in cast(List[nn.Parameter], param_group['params']): + if size_of_cur_group + param.numel() <= max_group_size: + cur_group.append(param) + size_of_cur_group += param.numel() + else: + all_groups.append(cur_group) + cur_group = [param] + size_of_cur_group = param.numel() + + if cur_group: + all_groups.append(cur_group) + + for group in all_groups: + param_groups.append({**param_group, 'params': group}) + else: + for moe_group in group_moe.values(): + for param_group in moe_group.values(): + param_groups.append(param_group) + + return param_groups + + +def is_moe_param_group(param_group): + return param_group.get('moe', False) + + +def configure_moe_param_groups(model_parameters: List): + assert isinstance(model_parameters, list), "model_parameters must be a list" + + for p in model_parameters: + # match torch.optim.Optimizer expectations, + # see: https://github.com/pytorch/pytorch/blob/2ffab6e663b9c6951048b8c8ba82d2cc5ca5c2fc/torch/optim/optimizer.py#L270-L272 + if not isinstance(p, (torch.Tensor, dict)): + raise TypeError("param argument that would be given to the optimizer should be " + f"an iterable of Tensors or dicts, but got {type(p)}") + + # peak at the first element to determine how to proceed + first = model_parameters[0] + + # Case 1: model_parameters is a list of torch.nn.Parameter + # -> need to create moe compatible param groups + if isinstance(first, torch.nn.Parameter): + param_group = {'params': model_parameters, 'name': 'dense-params'} + return split_params_into_different_moe_groups_for_optimizer(param_group) + + # Case 2: model_parameters is a list of param groups List[dict] + # -> moe compatible param groups might already exist, if not create them + elif isinstance(first, dict): + #there are no moe groups created + if not any(['moe' in param_group for param_group in model_parameters]): + return split_params_into_different_moe_groups_for_optimizer(model_parameters) + else: + # moe groups exist, nothing to do + return model_parameters diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..034c0eba53cf5348b4b398c5c8dbac5b86cc32ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6caa13eb66c66dbfa2a2d1e25cfd39dd7fc6bb89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5670e181be44ac474a95ff2f3c15b6380c1a7da Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06b5df82e44516251734690684654f4e9130c8a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dc78bcfce77f86a9a398b5dfd2d07a274887da5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad62abc46c33fc491cf55e19c70eca8d4a710ae6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7c0d5560f120c1416e9cda8ebb4cbba76124a03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b404ebd32c2806937d2f1c2bef13f0d6326d0c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..346d06efe24d6643155dbd1a1a4ef99fea919c95 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b90355fa3d26a81067b226c755e41c86f3ed47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ce3d3d6375e74fb3e69710444f256c80fec9430 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43051105fe11444c79bfd922f655ad53ffc9dc4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff36ed10004a88e225bece0123f7039d7655cb27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b76275ffbd096bc78cd9391d63b9d0ed08d7d6f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2513706c5e9ed6cf3c2580a25eb457c06749f32d Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0553efa96852aea49dd474d462e0c63a8e3679c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84d502fb0ee924dcd594b1ed36b697a57f97c7f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5203c773c5af703831a752b97c47213500072b29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52f90865812396f8467abb7480e95fdacac583a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7e041f629b610601a6f7c51666d6fb3940285c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9827f2c7880bd3ced5b3f44bcf832f22b624dada Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0a91e9258735ef11e56109aa7aed156c468bf02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/__init__.py b/venv/lib/python3.10/site-packages/joblib/test/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4fa05f72ee9fd605e0b487d8a9bbd23cd4bfde6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8152bbe66e231d6129f9625423f63be9af0bc4f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py b/venv/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..ba903d6cc2cd75879eed60ff31ecdf7ffe230d45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py @@ -0,0 +1,95 @@ +""" +This script is used to generate test data for joblib/test/test_numpy_pickle.py +""" + +import sys +import re + +# pytest needs to be able to import this module even when numpy is +# not installed +try: + import numpy as np +except ImportError: + np = None + +import joblib + + +def get_joblib_version(joblib_version=joblib.__version__): + """Normalize joblib version by removing suffix. + + >>> get_joblib_version('0.8.4') + '0.8.4' + >>> get_joblib_version('0.8.4b1') + '0.8.4' + >>> get_joblib_version('0.9.dev0') + '0.9' + """ + matches = [re.match(r'(\d+).*', each) + for each in joblib_version.split('.')] + return '.'.join([m.group(1) for m in matches if m is not None]) + + +def write_test_pickle(to_pickle, args): + kwargs = {} + compress = args.compress + method = args.method + joblib_version = get_joblib_version() + py_version = '{0[0]}{0[1]}'.format(sys.version_info) + numpy_version = ''.join(np.__version__.split('.')[:2]) + + # The game here is to generate the right filename according to the options. + body = '_compressed' if (compress and method == 'zlib') else '' + if compress: + if method == 'zlib': + kwargs['compress'] = True + extension = '.gz' + else: + kwargs['compress'] = (method, 3) + extension = '.pkl.{}'.format(method) + if args.cache_size: + kwargs['cache_size'] = 0 + body += '_cache_size' + else: + extension = '.pkl' + + pickle_filename = 'joblib_{}{}_pickle_py{}_np{}{}'.format( + joblib_version, body, py_version, numpy_version, extension) + + try: + joblib.dump(to_pickle, pickle_filename, **kwargs) + except Exception as e: + # With old python version (=< 3.3.), we can arrive there when + # dumping compressed pickle with LzmaFile. + print("Error: cannot generate file '{}' with arguments '{}'. " + "Error was: {}".format(pickle_filename, kwargs, e)) + else: + print("File '{}' generated successfully.".format(pickle_filename)) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser(description="Joblib pickle data " + "generator.") + parser.add_argument('--cache_size', action="store_true", + help="Force creation of companion numpy " + "files for pickled arrays.") + parser.add_argument('--compress', action="store_true", + help="Generate compress pickles.") + parser.add_argument('--method', type=str, default='zlib', + choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma', 'lz4'], + help="Set compression method.") + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + to_pickle = [np.arange(5, dtype=np.dtype('