Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- venv/bin/accelerate-config +8 -0
- venv/bin/accelerate-launch +8 -0
- venv/bin/activate +69 -0
- venv/bin/activate.csh +26 -0
- venv/bin/chardetect +8 -0
- venv/bin/ds_bench +17 -0
- venv/bin/ds_report +6 -0
- venv/bin/hjson +8 -0
- venv/bin/isympy +8 -0
- venv/bin/lm_eval +8 -0
- venv/bin/nltk +8 -0
- venv/bin/pip +8 -0
- venv/bin/torchrun +8 -0
- venv/bin/tqdm +8 -0
- venv/bin/transformers-cli +8 -0
- venv/bin/undill +22 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/ccl.py +193 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/comm.py +771 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/config.py +33 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/constants.py +51 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/utils.py +133 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base_moe.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/opt.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__init__.py +5 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/experts.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/layer.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/mappings.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/sharded_moe.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/experts.py +38 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/layer.py +132 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/mappings.py +108 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/sharded_moe.py +585 -0
- venv/lib/python3.10/site-packages/deepspeed/moe/utils.py +182 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -142,3 +142,6 @@ venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux
|
|
142 |
venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
|
143 |
venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
|
144 |
venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
142 |
venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
|
143 |
venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
|
144 |
venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
145 |
+
venv/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
146 |
+
venv/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
|
147 |
+
venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
|
venv/bin/accelerate-config
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from accelerate.commands.config import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/accelerate-launch
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from accelerate.commands.launch import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/activate
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file must be used with "source bin/activate" *from bash*
|
2 |
+
# you cannot run it directly
|
3 |
+
|
4 |
+
deactivate () {
|
5 |
+
# reset old environment variables
|
6 |
+
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
|
7 |
+
PATH="${_OLD_VIRTUAL_PATH:-}"
|
8 |
+
export PATH
|
9 |
+
unset _OLD_VIRTUAL_PATH
|
10 |
+
fi
|
11 |
+
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
|
12 |
+
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
|
13 |
+
export PYTHONHOME
|
14 |
+
unset _OLD_VIRTUAL_PYTHONHOME
|
15 |
+
fi
|
16 |
+
|
17 |
+
# This should detect bash and zsh, which have a hash command that must
|
18 |
+
# be called to get it to forget past commands. Without forgetting
|
19 |
+
# past commands the $PATH changes we made may not be respected
|
20 |
+
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
21 |
+
hash -r 2> /dev/null
|
22 |
+
fi
|
23 |
+
|
24 |
+
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
|
25 |
+
PS1="${_OLD_VIRTUAL_PS1:-}"
|
26 |
+
export PS1
|
27 |
+
unset _OLD_VIRTUAL_PS1
|
28 |
+
fi
|
29 |
+
|
30 |
+
unset VIRTUAL_ENV
|
31 |
+
unset VIRTUAL_ENV_PROMPT
|
32 |
+
if [ ! "${1:-}" = "nondestructive" ] ; then
|
33 |
+
# Self destruct!
|
34 |
+
unset -f deactivate
|
35 |
+
fi
|
36 |
+
}
|
37 |
+
|
38 |
+
# unset irrelevant variables
|
39 |
+
deactivate nondestructive
|
40 |
+
|
41 |
+
VIRTUAL_ENV="/mnt/weka/peacock/cronscript/venv"
|
42 |
+
export VIRTUAL_ENV
|
43 |
+
|
44 |
+
_OLD_VIRTUAL_PATH="$PATH"
|
45 |
+
PATH="$VIRTUAL_ENV/bin:$PATH"
|
46 |
+
export PATH
|
47 |
+
|
48 |
+
# unset PYTHONHOME if set
|
49 |
+
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
|
50 |
+
# could use `if (set -u; : $PYTHONHOME) ;` in bash
|
51 |
+
if [ -n "${PYTHONHOME:-}" ] ; then
|
52 |
+
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
|
53 |
+
unset PYTHONHOME
|
54 |
+
fi
|
55 |
+
|
56 |
+
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
|
57 |
+
_OLD_VIRTUAL_PS1="${PS1:-}"
|
58 |
+
PS1="(venv) ${PS1:-}"
|
59 |
+
export PS1
|
60 |
+
VIRTUAL_ENV_PROMPT="(venv) "
|
61 |
+
export VIRTUAL_ENV_PROMPT
|
62 |
+
fi
|
63 |
+
|
64 |
+
# This should detect bash and zsh, which have a hash command that must
|
65 |
+
# be called to get it to forget past commands. Without forgetting
|
66 |
+
# past commands the $PATH changes we made may not be respected
|
67 |
+
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
|
68 |
+
hash -r 2> /dev/null
|
69 |
+
fi
|
venv/bin/activate.csh
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file must be used with "source bin/activate.csh" *from csh*.
|
2 |
+
# You cannot run it directly.
|
3 |
+
# Created by Davide Di Blasi <[email protected]>.
|
4 |
+
# Ported to Python 3.3 venv by Andrew Svetlov <[email protected]>
|
5 |
+
|
6 |
+
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
|
7 |
+
|
8 |
+
# Unset irrelevant variables.
|
9 |
+
deactivate nondestructive
|
10 |
+
|
11 |
+
setenv VIRTUAL_ENV "/mnt/weka/peacock/cronscript/venv"
|
12 |
+
|
13 |
+
set _OLD_VIRTUAL_PATH="$PATH"
|
14 |
+
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
|
15 |
+
|
16 |
+
|
17 |
+
set _OLD_VIRTUAL_PROMPT="$prompt"
|
18 |
+
|
19 |
+
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
|
20 |
+
set prompt = "(venv) $prompt"
|
21 |
+
setenv VIRTUAL_ENV_PROMPT "(venv) "
|
22 |
+
endif
|
23 |
+
|
24 |
+
alias pydoc python -m pydoc
|
25 |
+
|
26 |
+
rehash
|
venv/bin/chardetect
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from chardet.cli.chardetect import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/ds_bench
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
|
3 |
+
from benchmarks.communication.run_all import main
|
4 |
+
from benchmarks.communication.constants import *
|
5 |
+
from benchmarks.communication.utils import *
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
|
9 |
+
# Run the same file with deepspeed launcher. This is required since setuptools will auto-detect python files and insert a python shebang for both 'scripts' and 'entry_points', and this benchmarks require the DS launcher
|
10 |
+
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
|
11 |
+
if not all(map(lambda v: v in os.environ, required_env)):
|
12 |
+
import subprocess
|
13 |
+
subprocess.run("deepspeed $(which ds_bench) " + " ".join(sys.argv[1:]), shell=True)
|
14 |
+
else:
|
15 |
+
args = benchmark_parser().parse_args()
|
16 |
+
rank = args.local_rank
|
17 |
+
main(args, rank)
|
venv/bin/ds_report
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
|
3 |
+
from deepspeed.env_report import cli_main
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
cli_main()
|
venv/bin/hjson
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from hjson.tool import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/isympy
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from isympy import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/lm_eval
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from lm_eval.__main__ import cli_evaluate
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(cli_evaluate())
|
venv/bin/nltk
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from nltk.cli import cli
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(cli())
|
venv/bin/pip
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from pip._internal.cli.main import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/torchrun
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from torch.distributed.run import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/tqdm
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from tqdm.cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/transformers-cli
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
from transformers.commands.transformers_cli import main
|
6 |
+
if __name__ == '__main__':
|
7 |
+
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
|
8 |
+
sys.exit(main())
|
venv/bin/undill
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/mnt/weka/peacock/cronscript/venv/bin/python3
|
2 |
+
#
|
3 |
+
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
|
4 |
+
# Copyright (c) 2008-2016 California Institute of Technology.
|
5 |
+
# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
|
6 |
+
# License: 3-clause BSD. The full license text is available at:
|
7 |
+
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
|
8 |
+
"""
|
9 |
+
unpickle the contents of a pickled object file
|
10 |
+
|
11 |
+
Examples::
|
12 |
+
|
13 |
+
$ undill hello.pkl
|
14 |
+
['hello', 'world']
|
15 |
+
"""
|
16 |
+
|
17 |
+
if __name__ == '__main__':
|
18 |
+
import sys
|
19 |
+
import dill
|
20 |
+
for file in sys.argv[1:]:
|
21 |
+
print (dill.load(open(file,'rb')))
|
22 |
+
|
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/comm.cpython-310.pyc
ADDED
Binary file (20 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/comm/ccl.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
'''
|
6 |
+
Copyright 2021 The Microsoft DeepSpeed Team
|
7 |
+
'''
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from deepspeed.accelerator import get_accelerator
|
11 |
+
from .reduce_op import ReduceOp
|
12 |
+
from .torch import TorchBackend
|
13 |
+
|
14 |
+
|
15 |
+
def build_ccl_op():
|
16 |
+
builder = get_accelerator().create_op_builder("CCLCommBuilder")
|
17 |
+
if builder is None:
|
18 |
+
return None
|
19 |
+
ccl_cpp_module = builder.load()
|
20 |
+
print(f'DeepSpeed {builder.absolute_name()} built successfully')
|
21 |
+
return ccl_cpp_module
|
22 |
+
|
23 |
+
|
24 |
+
class CCLHandler():
|
25 |
+
|
26 |
+
def __init__(self, ccl_comm_op=None):
|
27 |
+
self.ccl_comm_op = ccl_comm_op
|
28 |
+
|
29 |
+
def wait(self):
|
30 |
+
# backend covered it
|
31 |
+
pass
|
32 |
+
|
33 |
+
|
34 |
+
class CCLBackend(TorchBackend):
|
35 |
+
|
36 |
+
def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None):
|
37 |
+
self.ccl_comm_op = build_ccl_op()
|
38 |
+
if self.ccl_comm_op is None:
|
39 |
+
# set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded
|
40 |
+
self.initialized = False
|
41 |
+
return
|
42 |
+
super(CCLBackend, self).__init__(backend='ccl',
|
43 |
+
name='torch',
|
44 |
+
rank=rank,
|
45 |
+
world_size=world_size,
|
46 |
+
timeout=timeout,
|
47 |
+
init_method=init_method)
|
48 |
+
self.name = 'ccl'
|
49 |
+
size = self.get_world_size()
|
50 |
+
rank = self.get_rank()
|
51 |
+
main_kvs = self.ccl_comm_op.get_kvs_addr(rank)
|
52 |
+
main_kvs = torch.tensor(main_kvs).to(torch.uint8).to(get_accelerator().current_device_name())
|
53 |
+
super(CCLBackend, self).broadcast(main_kvs, 0)
|
54 |
+
self.ccl_comm_op.initialize(size, rank, main_kvs)
|
55 |
+
self.initialized = True
|
56 |
+
self.groups = [tuple(range(self.get_world_size()))]
|
57 |
+
self.available_coll = self.ccl_comm_op.get_available_coll()
|
58 |
+
|
59 |
+
def is_initialized(self):
|
60 |
+
return self.initialized
|
61 |
+
|
62 |
+
def run_collective(self, name, **kwargs):
|
63 |
+
if name in self.available_coll:
|
64 |
+
if 'group' in kwargs:
|
65 |
+
kwargs['group'] = self.get_all_ranks_from_group(kwargs['group'])
|
66 |
+
if 'dst' in kwargs:
|
67 |
+
kwargs['dst'] = kwargs['group'].index(kwargs['dst'])
|
68 |
+
if 'src' in kwargs:
|
69 |
+
kwargs['src'] = kwargs['group'].index(kwargs['src'])
|
70 |
+
func = "self.ccl_comm_op." + name
|
71 |
+
eval(func)(*(kwargs.values()))
|
72 |
+
return CCLHandler(self.ccl_comm_op)
|
73 |
+
else:
|
74 |
+
func = "super(CCLBackend, self)." + name
|
75 |
+
eval(func)(*(kwargs.values()))
|
76 |
+
return CCLHandler(self.ccl_comm_op)
|
77 |
+
|
78 |
+
def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False):
|
79 |
+
use_caching = False
|
80 |
+
if use_caching:
|
81 |
+
match_id = f"{tensor.size()}-{op}"
|
82 |
+
name = "all_reduce_caching"
|
83 |
+
if name in self.available_coll:
|
84 |
+
group = self.get_all_ranks_from_group(group)
|
85 |
+
return self.ccl_comm_op.all_reduce_caching(tensor, op, match_id, group, async_op)
|
86 |
+
else:
|
87 |
+
return self.run_collective(name=name,
|
88 |
+
tensor=tensor,
|
89 |
+
op=op,
|
90 |
+
match_id=match_id,
|
91 |
+
group=group,
|
92 |
+
async_op=async_op)
|
93 |
+
else:
|
94 |
+
name = "all_reduce"
|
95 |
+
if name in self.available_coll:
|
96 |
+
group = self.get_all_ranks_from_group(group)
|
97 |
+
return self.ccl_comm_op.all_reduce(tensor, op, group, async_op)
|
98 |
+
else:
|
99 |
+
return self.run_collective(name=name, tensor=tensor, op=op, group=group, async_op=async_op)
|
100 |
+
|
101 |
+
def inference_all_reduce(self, tensor, op=ReduceOp.SUM, group=None):
|
102 |
+
name = "inference_all_reduce"
|
103 |
+
if name in self.available_coll:
|
104 |
+
return self.ccl_comm_op.inference_all_reduce(tensor, op)
|
105 |
+
else:
|
106 |
+
return self.run_collective(name=name, tensor=tensor, op=op, group=None, async_op=False)
|
107 |
+
|
108 |
+
def broadcast(self, tensor, src, group=None, async_op=False):
|
109 |
+
return self.run_collective(name="broadcast", tensor=tensor, src=src, group=group, async_op=async_op)
|
110 |
+
|
111 |
+
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
|
112 |
+
return self.run_collective(name="all_gather",
|
113 |
+
tensor_list=tensor_list,
|
114 |
+
tensor=tensor,
|
115 |
+
group=group,
|
116 |
+
async_op=async_op)
|
117 |
+
|
118 |
+
def reduce_scatter_tensor(self, output_tensor, input_tensor, op, group=None, async_op=False):
|
119 |
+
return self.run_collective(name="reduce_scatter_tensor",
|
120 |
+
output_tensor=output_tensor,
|
121 |
+
input_tensor=input_tensor,
|
122 |
+
op=op,
|
123 |
+
group=group)
|
124 |
+
|
125 |
+
def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
|
126 |
+
return self.run_collective(name="all_gather_into_tensor",
|
127 |
+
output_tensor=output_tensor,
|
128 |
+
input_tensor=input_tensor,
|
129 |
+
group=group)
|
130 |
+
|
131 |
+
def all_to_all_single(self, output, input, output_split_sizes, input_split_sizes, group=None, async_op=False):
|
132 |
+
return self.run_collective(name="all_to_all_single",
|
133 |
+
output=output,
|
134 |
+
input=input,
|
135 |
+
output_split_sizes=output_split_sizes,
|
136 |
+
input_split_sizes=input_split_sizes,
|
137 |
+
group=group)
|
138 |
+
|
139 |
+
def send(self, tensor, dst, group=None, tag=0):
|
140 |
+
return self.run_collective(name="send", tensor=tensor, dst=dst, group=group, tag=tag)
|
141 |
+
|
142 |
+
def recv(self, tensor, src, group=None, tag=0):
|
143 |
+
return self.run_collective(name="recv", tensor=tensor, src=src, group=group, tag=tag)
|
144 |
+
|
145 |
+
def gather(self, tensor, gather_list, dst, group=None, async_op=False):
|
146 |
+
return self.run_collective(name="gather", tensor=tensor, gather_list=gather_list, dst=dst, group=group)
|
147 |
+
|
148 |
+
def scatter(self, tensor, gather_list, dst, group=None, async_op=False):
|
149 |
+
return self.run_collective(name="scatter", tensor=tensor, gather_list=gather_list, dst=dst, group=group)
|
150 |
+
|
151 |
+
def barrier(self, group=None, async_op=False):
|
152 |
+
return self.run_collective(name="barrier", group=group, async_op=async_op)
|
153 |
+
|
154 |
+
def monitored_barrier(self, group=None, timeout=None, wait_all_ranks=False):
|
155 |
+
return self.run_collective(name="monitored_barrier", group=group)
|
156 |
+
|
157 |
+
def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
|
158 |
+
return self.run_collective(name="reduce_scatter",
|
159 |
+
output=output,
|
160 |
+
input_list=input_list,
|
161 |
+
op=op,
|
162 |
+
group=group,
|
163 |
+
async_op=async_op)
|
164 |
+
|
165 |
+
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
|
166 |
+
return self.run_collective(name="reduce", tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
|
167 |
+
|
168 |
+
def new_group(self, ranks):
|
169 |
+
return super(CCLBackend, self).new_group(ranks)
|
170 |
+
|
171 |
+
def _new_group(self, ranks, group):
|
172 |
+
size = len(ranks)
|
173 |
+
rank = self.get_rank()
|
174 |
+
sub_main_kvs = self.ccl_comm_op.get_sub_kvs_addr(rank == ranks[0])
|
175 |
+
sub_main_kvs = torch.tensor(sub_main_kvs).to(torch.uint8).to(get_accelerator().current_device_name())
|
176 |
+
super(CCLBackend, self).broadcast(sub_main_kvs, ranks[0], group)
|
177 |
+
self.ccl_comm_op.initialize_sub_comm(size, ranks.index(rank), sub_main_kvs, ranks)
|
178 |
+
self.groups.append(tuple(ranks))
|
179 |
+
|
180 |
+
def get_all_ranks_from_group(self, group):
|
181 |
+
if group is None:
|
182 |
+
return list(range(self.get_world_size()))
|
183 |
+
rank = 0
|
184 |
+
results = []
|
185 |
+
try:
|
186 |
+
while True:
|
187 |
+
results.append(super(CCLBackend, self).get_global_rank(group, rank))
|
188 |
+
rank += 1
|
189 |
+
except (ValueError, RuntimeError):
|
190 |
+
pass
|
191 |
+
if tuple(results) not in self.groups:
|
192 |
+
self._new_group(results, group)
|
193 |
+
return results
|
venv/lib/python3.10/site-packages/deepspeed/comm/comm.py
ADDED
@@ -0,0 +1,771 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
"""
|
6 |
+
DeepSpeed Communication Package: deepspeed.comm
|
7 |
+
deepspeed.comm
|
8 |
+
-- import and use deepspeed.ops.comm
|
9 |
+
-- use torch.distributed directly if both this package and torch.distributed use the same NCCL version
|
10 |
+
-- use custom collectives
|
11 |
+
-- can either use torch.dist or ds.ops.comm?
|
12 |
+
Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well.
|
13 |
+
deepspeed.comm API
|
14 |
+
-- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility.
|
15 |
+
-- e.g. if a client code used
|
16 |
+
from deepspeed import comm as dist
|
17 |
+
instead of
|
18 |
+
import torch.distributed as dist
|
19 |
+
The code should work without breaking any of the public torch.distributed functionality
|
20 |
+
Future:
|
21 |
+
-- deepspeed groups API should be brought into ds.comm
|
22 |
+
"""
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import os
|
26 |
+
|
27 |
+
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
|
28 |
+
from .constants import *
|
29 |
+
from .reduce_op import *
|
30 |
+
from deepspeed.accelerator import get_accelerator
|
31 |
+
from deepspeed.comm.ccl import CCLBackend
|
32 |
+
|
33 |
+
from deepspeed.utils.comms_logging import CommsLogger
|
34 |
+
from deepspeed.utils import timer, get_caller_func
|
35 |
+
from deepspeed.comm.torch import TorchBackend
|
36 |
+
|
37 |
+
from deepspeed import utils
|
38 |
+
from datetime import timedelta
|
39 |
+
|
40 |
+
# Current deepspeed.comm backend (cdb) global object for simple access by client code
|
41 |
+
cdb = None
|
42 |
+
|
43 |
+
# Create global timer for ops
|
44 |
+
timers = timer.SynchronizedWallClockTimer()
|
45 |
+
timer_summary = {}
|
46 |
+
|
47 |
+
comms_logger = CommsLogger()
|
48 |
+
|
49 |
+
# Maintain objects of all initialized ds backends and assign them using the API functions in this file
|
50 |
+
nccl_backend = None
|
51 |
+
mpi_backend = None
|
52 |
+
ccl_backend = None
|
53 |
+
hccl_backend = None
|
54 |
+
|
55 |
+
# This should be set here so all rank/size information from the launcher can be propagated
|
56 |
+
from deepspeed.comm.utils import *
|
57 |
+
|
58 |
+
|
59 |
+
class ProcessGroup():
|
60 |
+
|
61 |
+
def __init__(self, comm_id, ranks=[]):
|
62 |
+
self.ranks = ranks
|
63 |
+
self.comm_id = comm_id
|
64 |
+
self.size = len(ranks)
|
65 |
+
|
66 |
+
|
67 |
+
def _configure_using_config_file(config):
|
68 |
+
if config.comms_logger_enabled:
|
69 |
+
comms_logger.configure(config)
|
70 |
+
|
71 |
+
|
72 |
+
def configure(
|
73 |
+
deepspeed_config=None,
|
74 |
+
enabled=None,
|
75 |
+
prof_all=None,
|
76 |
+
prof_ops=None,
|
77 |
+
verbose=None,
|
78 |
+
debug=None,
|
79 |
+
):
|
80 |
+
|
81 |
+
if deepspeed_config is not None:
|
82 |
+
_configure_using_config_file(deepspeed_config.comms_config)
|
83 |
+
|
84 |
+
if enabled is not None:
|
85 |
+
comms_logger.enabled = enabled
|
86 |
+
|
87 |
+
if prof_all is not None:
|
88 |
+
comms_logger.prof_all = prof_all
|
89 |
+
|
90 |
+
if prof_ops is not None:
|
91 |
+
comms_logger.prof_ops = prof_ops
|
92 |
+
|
93 |
+
if verbose is not None:
|
94 |
+
comms_logger.verbose = verbose
|
95 |
+
|
96 |
+
if debug is not None:
|
97 |
+
comms_logger.debug = debug
|
98 |
+
|
99 |
+
|
100 |
+
# Logging wrapper for timing ops
|
101 |
+
def timed_op(func):
|
102 |
+
|
103 |
+
def log_wrapper(*args, **kwargs):
|
104 |
+
# Add enabled flag so that overhead to each comm op is two if conditions at most
|
105 |
+
if comms_logger.enabled:
|
106 |
+
if ('prof' in kwargs
|
107 |
+
and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs
|
108 |
+
and kwargs['log_name'] in comms_logger.prof_ops):
|
109 |
+
# Need func args for their defaults
|
110 |
+
func_args = get_default_args(func)
|
111 |
+
func_args.update(kwargs)
|
112 |
+
msg_size = get_msg_size_from_args(func, *args, **kwargs)
|
113 |
+
log_name = get_debug_log_name(func_args, comms_logger.debug)
|
114 |
+
timers(log_name).start()
|
115 |
+
# Return the op, then stop the op's timer
|
116 |
+
try:
|
117 |
+
return func(*args, **kwargs)
|
118 |
+
finally:
|
119 |
+
if comms_logger.enabled:
|
120 |
+
# Need to make op blocking for accurate logging
|
121 |
+
get_accelerator().synchronize()
|
122 |
+
# If we're using MPI, we can't simply sync the stream
|
123 |
+
if cdb.using_mpi:
|
124 |
+
cdb.barrier()
|
125 |
+
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
|
126 |
+
'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops):
|
127 |
+
log_name = get_debug_log_name(func_args, comms_logger.debug)
|
128 |
+
raw_name = func.__name__
|
129 |
+
timers(log_name).stop()
|
130 |
+
# need temp var since 'elapsed' resets events
|
131 |
+
time_elapsed = timers(log_name).elapsed(reset=False)
|
132 |
+
comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
|
133 |
+
|
134 |
+
return log_wrapper
|
135 |
+
|
136 |
+
|
137 |
+
# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code.
|
138 |
+
# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation.
|
139 |
+
# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html
|
140 |
+
|
141 |
+
|
142 |
+
# UNUSED: Future helper function to initialize DS backends
|
143 |
+
def init_deepspeed_backend(ds_backend, timeout, init_method):
|
144 |
+
global cdb
|
145 |
+
global nccl_backend
|
146 |
+
global mpi_backend
|
147 |
+
global ccl_backend
|
148 |
+
global hccl_backend
|
149 |
+
|
150 |
+
rank = int(os.getenv('RANK', '-1'))
|
151 |
+
size = int(os.getenv('WORLD_SIZE', '-1'))
|
152 |
+
|
153 |
+
if ds_backend == NCCL_BACKEND:
|
154 |
+
utils.logger.debug("NCCL backend in DeepSpeed not yet implemented")
|
155 |
+
elif ds_backend == MPI_BACKEND:
|
156 |
+
utils.logger.debug("MPI backend in DeepSpeed not yet implemented")
|
157 |
+
elif ds_backend == GLOO_BACKEND:
|
158 |
+
utils.logger.debug("Gloo backend in DeepSpeed not yet implemented")
|
159 |
+
elif ds_backend == CCL_BACKEND:
|
160 |
+
ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method)
|
161 |
+
utils.logger.info(f"Initialize {ds_backend} backend")
|
162 |
+
elif ds_backend == HCCL_BACKEND:
|
163 |
+
utils.logger.debug("HCCL backend in DeepSpeed not yet implemented")
|
164 |
+
else:
|
165 |
+
utils.logger.debug(f"DeepSpeed does not support {ds_backend} backend")
|
166 |
+
|
167 |
+
|
168 |
+
def is_initialized():
|
169 |
+
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
170 |
+
if cdb is None:
|
171 |
+
return False
|
172 |
+
else:
|
173 |
+
return cdb.is_initialized()
|
174 |
+
|
175 |
+
|
176 |
+
def destroy_process_group(group=None):
|
177 |
+
global cdb
|
178 |
+
return cdb.destroy_process_group(group=group)
|
179 |
+
|
180 |
+
|
181 |
+
def new_group(ranks):
|
182 |
+
global cdb
|
183 |
+
assert cdb is not None and cdb.is_initialized(
|
184 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
185 |
+
return cdb.new_group(ranks)
|
186 |
+
|
187 |
+
|
188 |
+
def is_available() -> bool:
|
189 |
+
|
190 |
+
# Returns ``True`` if the deepspeed comm package is available.
|
191 |
+
|
192 |
+
# TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
|
193 |
+
# any communication related primitives from this package.
|
194 |
+
# use hasattr(deepspeed.csrc.ops, "_comm") or something
|
195 |
+
return True
|
196 |
+
|
197 |
+
|
198 |
+
def set_backend():
|
199 |
+
global cdb
|
200 |
+
global nccl_backend
|
201 |
+
global mpi_backend
|
202 |
+
global ccl_backend
|
203 |
+
global hccl_backend
|
204 |
+
|
205 |
+
backend_name = get_accelerator().communication_backend_name()
|
206 |
+
|
207 |
+
if backend_name == NCCL_BACKEND:
|
208 |
+
if nccl_backend is not None and nccl_backend.is_initialized():
|
209 |
+
cdb = nccl_backend
|
210 |
+
elif backend_name == MPI_BACKEND:
|
211 |
+
if mpi_backend is not None and mpi_backend.is_initialized():
|
212 |
+
cdb = mpi_backend
|
213 |
+
elif backend_name == CCL_BACKEND:
|
214 |
+
if ccl_backend is not None and ccl_backend.is_initialized():
|
215 |
+
cdb = ccl_backend
|
216 |
+
elif backend_name == HCCL_BACKEND:
|
217 |
+
if hccl_backend is not None and hccl_backend.is_initialized():
|
218 |
+
cdb = hccl_backend
|
219 |
+
|
220 |
+
|
221 |
+
@timed_op
|
222 |
+
def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()):
|
223 |
+
global cdb
|
224 |
+
return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
|
225 |
+
|
226 |
+
|
227 |
+
@timed_op
|
228 |
+
def all_gather(tensor_list,
|
229 |
+
tensor,
|
230 |
+
group=None,
|
231 |
+
async_op=False,
|
232 |
+
prof=False,
|
233 |
+
log_name='all_gather',
|
234 |
+
debug=get_caller_func()):
|
235 |
+
global cdb
|
236 |
+
return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
|
237 |
+
|
238 |
+
|
239 |
+
def has_reduce_scatter_tensor():
|
240 |
+
global cdb
|
241 |
+
assert cdb is not None and cdb.is_initialized(
|
242 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
243 |
+
return cdb.has_reduce_scatter_tensor()
|
244 |
+
|
245 |
+
|
246 |
+
def reduce_scatter_fn(output_tensor,
|
247 |
+
tensor,
|
248 |
+
op=ReduceOp.SUM,
|
249 |
+
group=None,
|
250 |
+
async_op=False,
|
251 |
+
prof=False,
|
252 |
+
debug=get_caller_func()):
|
253 |
+
global cdb
|
254 |
+
assert cdb is not None and cdb.is_initialized(
|
255 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
256 |
+
if cdb.has_reduce_scatter_tensor():
|
257 |
+
return reduce_scatter_tensor(output_tensor,
|
258 |
+
tensor,
|
259 |
+
op=op,
|
260 |
+
group=group,
|
261 |
+
async_op=async_op,
|
262 |
+
prof=prof,
|
263 |
+
debug=debug)
|
264 |
+
else:
|
265 |
+
if get_rank() == 0:
|
266 |
+
utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
|
267 |
+
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
|
268 |
+
"please consider upgrading your pytorch installation.")
|
269 |
+
input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
|
270 |
+
return reduce_scatter(output_tensor,
|
271 |
+
input_tensor_lst,
|
272 |
+
op=op,
|
273 |
+
group=group,
|
274 |
+
async_op=async_op,
|
275 |
+
prof=prof,
|
276 |
+
debug=debug)
|
277 |
+
|
278 |
+
|
279 |
+
@timed_op
|
280 |
+
def reduce_scatter_tensor(output_tensor,
|
281 |
+
tensor,
|
282 |
+
op=ReduceOp.SUM,
|
283 |
+
group=None,
|
284 |
+
async_op=False,
|
285 |
+
prof=False,
|
286 |
+
log_name='reduce_scatter_tensor',
|
287 |
+
debug=get_caller_func()):
|
288 |
+
global cdb
|
289 |
+
return cdb.reduce_scatter_tensor(output_tensor=output_tensor,
|
290 |
+
input_tensor=tensor,
|
291 |
+
op=op,
|
292 |
+
group=group,
|
293 |
+
async_op=async_op)
|
294 |
+
|
295 |
+
|
296 |
+
@timed_op
|
297 |
+
def all_gather_into_tensor(output_tensor,
|
298 |
+
tensor,
|
299 |
+
group=None,
|
300 |
+
async_op=False,
|
301 |
+
prof=False,
|
302 |
+
log_name='all_gather_into_tensor',
|
303 |
+
debug=get_caller_func()):
|
304 |
+
global cdb
|
305 |
+
return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op)
|
306 |
+
|
307 |
+
|
308 |
+
def has_all_gather_into_tensor():
|
309 |
+
global cdb
|
310 |
+
assert cdb is not None and cdb.is_initialized(
|
311 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
312 |
+
return cdb.has_all_gather_into_tensor()
|
313 |
+
|
314 |
+
|
315 |
+
def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()):
|
316 |
+
global cdb
|
317 |
+
assert cdb is not None and cdb.is_initialized(
|
318 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
319 |
+
if cdb.has_all_gather_into_tensor():
|
320 |
+
return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug)
|
321 |
+
else:
|
322 |
+
if get_rank() == 0:
|
323 |
+
utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to "
|
324 |
+
"torch.distributed.all_gather which will result in suboptimal performance. "
|
325 |
+
"please consider upgrading your pytorch installation.")
|
326 |
+
output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group)))
|
327 |
+
return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug)
|
328 |
+
|
329 |
+
|
330 |
+
@timed_op
|
331 |
+
def all_to_all_single(output,
|
332 |
+
tensor,
|
333 |
+
output_split_sizes=None,
|
334 |
+
input_split_sizes=None,
|
335 |
+
group=None,
|
336 |
+
async_op=False,
|
337 |
+
prof=False,
|
338 |
+
log_name='all_to_all_single',
|
339 |
+
debug=get_caller_func()):
|
340 |
+
global cdb
|
341 |
+
return cdb.all_to_all_single(output=output,
|
342 |
+
input=tensor,
|
343 |
+
output_split_sizes=output_split_sizes,
|
344 |
+
input_split_sizes=input_split_sizes,
|
345 |
+
group=group,
|
346 |
+
async_op=async_op)
|
347 |
+
|
348 |
+
|
349 |
+
@timed_op
|
350 |
+
def all_to_all(output_tensor_list, input_tensor_list, group=None, async_op=False):
|
351 |
+
global cdb
|
352 |
+
return cdb.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op)
|
353 |
+
|
354 |
+
|
355 |
+
@timed_op
|
356 |
+
def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()):
|
357 |
+
global cdb
|
358 |
+
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
|
359 |
+
|
360 |
+
|
361 |
+
@timed_op
|
362 |
+
def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()):
|
363 |
+
global cdb
|
364 |
+
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
|
365 |
+
|
366 |
+
|
367 |
+
@timed_op
|
368 |
+
def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()):
|
369 |
+
global cdb
|
370 |
+
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
|
371 |
+
|
372 |
+
|
373 |
+
@timed_op
|
374 |
+
def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()):
|
375 |
+
global cdb
|
376 |
+
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
|
377 |
+
|
378 |
+
|
379 |
+
@timed_op
|
380 |
+
def gather(tensor,
|
381 |
+
gather_list=None,
|
382 |
+
dst=0,
|
383 |
+
group=None,
|
384 |
+
async_op=False,
|
385 |
+
prof=False,
|
386 |
+
log_name='gather',
|
387 |
+
debug=get_caller_func()):
|
388 |
+
global cdb
|
389 |
+
return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op)
|
390 |
+
|
391 |
+
|
392 |
+
@timed_op
|
393 |
+
def scatter(tensor,
|
394 |
+
scatter_list=None,
|
395 |
+
src=0,
|
396 |
+
group=None,
|
397 |
+
async_op=False,
|
398 |
+
prof=False,
|
399 |
+
log_name='scatter',
|
400 |
+
debug=get_caller_func()):
|
401 |
+
global cdb
|
402 |
+
return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op)
|
403 |
+
|
404 |
+
|
405 |
+
@timed_op
|
406 |
+
def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()):
|
407 |
+
global cdb
|
408 |
+
return cdb.barrier(group=group, async_op=async_op)
|
409 |
+
|
410 |
+
|
411 |
+
@timed_op
|
412 |
+
def monitored_barrier(group=None,
|
413 |
+
timeout=None,
|
414 |
+
wait_all_ranks=False,
|
415 |
+
prof=False,
|
416 |
+
log_name='monitored_barrier',
|
417 |
+
debug=get_caller_func()):
|
418 |
+
global cdb
|
419 |
+
return cdb.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
|
420 |
+
|
421 |
+
|
422 |
+
def log_summary(show_straggler=False):
|
423 |
+
global cdb
|
424 |
+
barrier(log_name='log_summary_barrier')
|
425 |
+
if cdb.get_rank() == 0:
|
426 |
+
comms_logger.log_all(print_log=True, show_straggler=show_straggler)
|
427 |
+
else:
|
428 |
+
comms_logger.log_all(print_log=False, show_straggler=show_straggler)
|
429 |
+
barrier(log_name='log_summary_barrier')
|
430 |
+
|
431 |
+
|
432 |
+
@timed_op
|
433 |
+
def reduce(tensor,
|
434 |
+
dst,
|
435 |
+
op=ReduceOp.SUM,
|
436 |
+
group=None,
|
437 |
+
async_op=False,
|
438 |
+
prof=False,
|
439 |
+
log_name='reduce',
|
440 |
+
debug=get_caller_func()):
|
441 |
+
global cdb
|
442 |
+
return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
|
443 |
+
|
444 |
+
|
445 |
+
@timed_op
|
446 |
+
def reduce_scatter(output,
|
447 |
+
input_list,
|
448 |
+
op=ReduceOp.SUM,
|
449 |
+
group=None,
|
450 |
+
async_op=False,
|
451 |
+
prof=False,
|
452 |
+
log_name='reduce_scatter',
|
453 |
+
debug=get_caller_func()):
|
454 |
+
global cdb
|
455 |
+
return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op)
|
456 |
+
|
457 |
+
|
458 |
+
def has_all_reduce_coalesced():
|
459 |
+
""""""
|
460 |
+
global cdb
|
461 |
+
assert cdb is not None and cdb.is_initialized(
|
462 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
463 |
+
assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined'
|
464 |
+
return cdb.has_all_reduce_coalesced
|
465 |
+
|
466 |
+
|
467 |
+
def has_coalescing_manager():
|
468 |
+
global cdb
|
469 |
+
assert cdb is not None and cdb.is_initialized(
|
470 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
471 |
+
assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined'
|
472 |
+
return cdb.has_coalescing_manager
|
473 |
+
|
474 |
+
|
475 |
+
def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False):
|
476 |
+
global cdb
|
477 |
+
assert cdb is not None and cdb.is_initialized(
|
478 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
479 |
+
return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op)
|
480 |
+
|
481 |
+
|
482 |
+
@timed_op
|
483 |
+
def all_reduce(tensor,
|
484 |
+
op=ReduceOp.SUM,
|
485 |
+
group=None,
|
486 |
+
async_op=False,
|
487 |
+
prof=False,
|
488 |
+
log_name='all_reduce',
|
489 |
+
debug=get_caller_func()):
|
490 |
+
#if profile_comm:
|
491 |
+
# context of the timers?
|
492 |
+
# timers.start()
|
493 |
+
# TensorBoard logging for comm calls.?
|
494 |
+
global cdb
|
495 |
+
#print(f'op = {op}, cdb= {cdb.name}')
|
496 |
+
return cdb.all_reduce(tensor, op, group, async_op)
|
497 |
+
|
498 |
+
|
499 |
+
@timed_op
|
500 |
+
def inference_all_reduce(tensor,
|
501 |
+
op=ReduceOp.SUM,
|
502 |
+
group=None,
|
503 |
+
async_op=False,
|
504 |
+
prof=False,
|
505 |
+
log_name='all_reduce',
|
506 |
+
debug=get_caller_func()):
|
507 |
+
global cdb
|
508 |
+
return cdb.inference_all_reduce(tensor, op, group)
|
509 |
+
|
510 |
+
|
511 |
+
@timed_op
|
512 |
+
def all_reduce_coalesced(tensors,
|
513 |
+
op=ReduceOp.SUM,
|
514 |
+
group=None,
|
515 |
+
async_op=False,
|
516 |
+
prof=False,
|
517 |
+
log_name='all_reduce',
|
518 |
+
debug=get_caller_func()):
|
519 |
+
global cdb
|
520 |
+
return cdb.all_reduce_coalesced(tensors, op, group, async_op)
|
521 |
+
|
522 |
+
|
523 |
+
def get_world_group():
|
524 |
+
global cdb
|
525 |
+
assert cdb is not None and cdb.is_initialized(
|
526 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
527 |
+
return cdb.get_world_group()
|
528 |
+
|
529 |
+
|
530 |
+
def get_world_size(group=None) -> int:
|
531 |
+
"""
|
532 |
+
Returns the number of processes in the current process group
|
533 |
+
Args:
|
534 |
+
group (ProcessGroup, optional): The process group to work on. If None,
|
535 |
+
the default process group will be used.
|
536 |
+
Returns:
|
537 |
+
The world size of the process group
|
538 |
+
-1, if not part of the group
|
539 |
+
"""
|
540 |
+
global cdb
|
541 |
+
|
542 |
+
assert cdb is not None and cdb.is_initialized(
|
543 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
544 |
+
return cdb.get_world_size(group)
|
545 |
+
|
546 |
+
|
547 |
+
def get_rank(group=None):
|
548 |
+
"""
|
549 |
+
Returns the rank of the current process in the provided ``group`` or the
|
550 |
+
default group if none was provided.
|
551 |
+
Rank is a unique identifier assigned to each process within a distributed
|
552 |
+
process group. They are always consecutive integers ranging from 0 to
|
553 |
+
``world_size``.
|
554 |
+
Args:
|
555 |
+
group (ProcessGroup, optional): The process group to work on. If None,
|
556 |
+
the default process group will be used.
|
557 |
+
Returns:
|
558 |
+
The rank of the process group
|
559 |
+
-1, if not part of the group
|
560 |
+
"""
|
561 |
+
global cdb
|
562 |
+
assert cdb is not None and cdb.is_initialized(
|
563 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
564 |
+
return cdb.get_rank(group)
|
565 |
+
|
566 |
+
|
567 |
+
def get_local_rank():
|
568 |
+
"""
|
569 |
+
Helper function to get local rank after a backend has been set and initialized
|
570 |
+
Args:
|
571 |
+
None
|
572 |
+
Returns:
|
573 |
+
local rank (= GPU device ID)
|
574 |
+
"""
|
575 |
+
global cdb
|
576 |
+
assert cdb is not None and cdb.is_initialized(
|
577 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
578 |
+
return get_local_rank_from_launcher()
|
579 |
+
|
580 |
+
|
581 |
+
def get_global_rank(group=None, group_rank=0):
|
582 |
+
global cdb
|
583 |
+
assert cdb is not None and cdb.is_initialized(
|
584 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
585 |
+
return cdb.get_global_rank(group, group_rank)
|
586 |
+
|
587 |
+
|
588 |
+
def get_all_ranks_from_group(group=None):
|
589 |
+
global cdb
|
590 |
+
assert cdb is not None and cdb.is_initialized(
|
591 |
+
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
|
592 |
+
rank = 0
|
593 |
+
group_ranks = []
|
594 |
+
try:
|
595 |
+
while True:
|
596 |
+
group_ranks.append(cdb.get_global_rank(group, rank))
|
597 |
+
rank += 1
|
598 |
+
except (RuntimeError, ValueError):
|
599 |
+
pass
|
600 |
+
return group_ranks
|
601 |
+
|
602 |
+
|
603 |
+
# Main DeepSpeed Comms. public API.
|
604 |
+
def init_distributed(dist_backend=None,
|
605 |
+
auto_mpi_discovery=True,
|
606 |
+
distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
|
607 |
+
verbose=True,
|
608 |
+
timeout=default_pg_timeout,
|
609 |
+
init_method=None,
|
610 |
+
dist_init_required=None,
|
611 |
+
config=None,
|
612 |
+
rank=-1,
|
613 |
+
world_size=-1):
|
614 |
+
''' Initialize dist backend, potentially performing MPI discovery if needed
|
615 |
+
|
616 |
+
Arguments:
|
617 |
+
dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo, hccl
|
618 |
+
auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
|
619 |
+
distributed_port: Optional (int). torch distributed backend port
|
620 |
+
verbose: Optional (bool). verbose logging
|
621 |
+
timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
|
622 |
+
init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified.
|
623 |
+
config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
|
624 |
+
rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization)
|
625 |
+
world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization.
|
626 |
+
'''
|
627 |
+
global cdb
|
628 |
+
|
629 |
+
configure(deepspeed_config=config)
|
630 |
+
|
631 |
+
if dist_init_required is None:
|
632 |
+
dist_init_required = cdb is None or not cdb.is_initialized()
|
633 |
+
|
634 |
+
if cdb is None:
|
635 |
+
init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method)
|
636 |
+
set_backend()
|
637 |
+
utils.logger.info(f'cdb={cdb}')
|
638 |
+
if cdb is None and torch.distributed.is_initialized():
|
639 |
+
# The user initialized torch.dist themselves, create cdb and short-circuit
|
640 |
+
cdb = TorchBackend(dist_backend, timeout, init_method)
|
641 |
+
return
|
642 |
+
|
643 |
+
if dist_init_required is False:
|
644 |
+
assert (
|
645 |
+
cdb is not None and cdb.is_initialized() is True
|
646 |
+
), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
|
647 |
+
else:
|
648 |
+
# Initialize torch distributed if needed
|
649 |
+
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
|
650 |
+
if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
|
651 |
+
if verbose:
|
652 |
+
utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...")
|
653 |
+
if in_aml() and not in_dlts():
|
654 |
+
patch_aml_env_for_torch_nccl_backend(verbose=verbose)
|
655 |
+
elif in_aws_sm():
|
656 |
+
patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
|
657 |
+
else:
|
658 |
+
mpi_discovery(distributed_port=distributed_port, verbose=verbose)
|
659 |
+
|
660 |
+
if cdb is not None and cdb.is_initialized():
|
661 |
+
if int(os.getenv('RANK', '0')) == 0:
|
662 |
+
utils.logger.info('Distributed backend already initialized')
|
663 |
+
else:
|
664 |
+
assert isinstance(timeout, timedelta)
|
665 |
+
if dist_backend is None:
|
666 |
+
dist_backend = get_accelerator().communication_backend_name()
|
667 |
+
if int(os.getenv('RANK', '0')) == 0:
|
668 |
+
utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend))
|
669 |
+
# Create a torch backend object, initialize torch distributed, and assign to cdb
|
670 |
+
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size)
|
671 |
+
|
672 |
+
|
673 |
+
def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
|
674 |
+
'''
|
675 |
+
Discovery MPI environment via mpi4py and map to relevant dist state
|
676 |
+
'''
|
677 |
+
from mpi4py import MPI
|
678 |
+
import subprocess
|
679 |
+
comm = MPI.COMM_WORLD
|
680 |
+
rank = comm.Get_rank()
|
681 |
+
world_size = comm.Get_size()
|
682 |
+
|
683 |
+
master_addr = None
|
684 |
+
if rank == 0:
|
685 |
+
hostname_cmd = ["hostname -I"]
|
686 |
+
result = subprocess.check_output(hostname_cmd, shell=True)
|
687 |
+
master_addr = result.decode('utf-8').split()[0]
|
688 |
+
master_addr = comm.bcast(master_addr, root=0)
|
689 |
+
|
690 |
+
# Determine local rank by assuming hostnames are unique
|
691 |
+
proc_name = MPI.Get_processor_name()
|
692 |
+
all_procs = comm.allgather(proc_name)
|
693 |
+
local_rank = sum([i == proc_name for i in all_procs[:rank]])
|
694 |
+
|
695 |
+
os.environ['RANK'] = str(rank)
|
696 |
+
os.environ['WORLD_SIZE'] = str(world_size)
|
697 |
+
os.environ['LOCAL_RANK'] = str(local_rank)
|
698 |
+
os.environ['MASTER_ADDR'] = master_addr
|
699 |
+
os.environ['MASTER_PORT'] = str(distributed_port)
|
700 |
+
|
701 |
+
if verbose:
|
702 |
+
utils.logger.info(
|
703 |
+
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}".
|
704 |
+
format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
|
705 |
+
os.environ['MASTER_PORT']))
|
706 |
+
|
707 |
+
if cdb is not None and cdb.is_initialized():
|
708 |
+
assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank())
|
709 |
+
assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
|
710 |
+
world_size, cdb.get_world_size())
|
711 |
+
|
712 |
+
|
713 |
+
def in_aml():
|
714 |
+
# Are we running inside an Azure Machine Learning (AML) environment?
|
715 |
+
return 'AZUREML_EXPERIMENT_ID' in os.environ
|
716 |
+
|
717 |
+
|
718 |
+
def in_aws_sm():
|
719 |
+
# Are we running inside an AWS SageMaker environment?
|
720 |
+
return 'SM_TRAINING_ENV' in os.environ
|
721 |
+
|
722 |
+
|
723 |
+
def in_dlts():
|
724 |
+
# Are we running on a DLTS cluster?
|
725 |
+
return 'DLTS_JOB_ID' in os.environ
|
726 |
+
|
727 |
+
|
728 |
+
def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
|
729 |
+
"""Helper routine to get and set environment variables.
|
730 |
+
This is adapted from Azure ML's documentation available from:
|
731 |
+
https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
|
732 |
+
"""
|
733 |
+
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
|
734 |
+
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
|
735 |
+
single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"])
|
736 |
+
|
737 |
+
if not single_node:
|
738 |
+
master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
|
739 |
+
os.environ["MASTER_ADDR"] = master_node_params[0]
|
740 |
+
# Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
|
741 |
+
if "MASTER_PORT" not in os.environ:
|
742 |
+
os.environ["MASTER_PORT"] = str(master_port)
|
743 |
+
else:
|
744 |
+
os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
|
745 |
+
os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
|
746 |
+
|
747 |
+
if verbose:
|
748 |
+
utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"]))
|
749 |
+
|
750 |
+
os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
|
751 |
+
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
|
752 |
+
|
753 |
+
if verbose:
|
754 |
+
utils.logger.info(
|
755 |
+
"Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
|
756 |
+
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
|
757 |
+
os.environ['MASTER_PORT']))
|
758 |
+
|
759 |
+
|
760 |
+
def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
|
761 |
+
"""Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
|
762 |
+
"""
|
763 |
+
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
|
764 |
+
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
|
765 |
+
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
|
766 |
+
|
767 |
+
if verbose:
|
768 |
+
utils.logger.info(
|
769 |
+
"Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
|
770 |
+
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
|
771 |
+
os.environ['MASTER_PORT']))
|
venv/lib/python3.10/site-packages/deepspeed/comm/config.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .constants import *
|
7 |
+
from ..pydantic_v1 import BaseModel
|
8 |
+
|
9 |
+
|
10 |
+
class CommsConfig(BaseModel):
|
11 |
+
|
12 |
+
class Config:
|
13 |
+
validate_all = True
|
14 |
+
validate_assignment = True
|
15 |
+
use_enum_values = True
|
16 |
+
extra = 'forbid'
|
17 |
+
|
18 |
+
|
19 |
+
class CommsLoggerConfig(CommsConfig):
|
20 |
+
enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT
|
21 |
+
prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT
|
22 |
+
prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT
|
23 |
+
verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT
|
24 |
+
debug: bool = COMMS_LOGGER_DEBUG_DEFAULT
|
25 |
+
|
26 |
+
|
27 |
+
class DeepSpeedCommsConfig:
|
28 |
+
|
29 |
+
def __init__(self, ds_config):
|
30 |
+
self.comms_logger_enabled = 'comms_logger' in ds_config
|
31 |
+
|
32 |
+
if self.comms_logger_enabled:
|
33 |
+
self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger'])
|
venv/lib/python3.10/site-packages/deepspeed/comm/constants.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
NCCL_BACKEND = 'nccl'
|
7 |
+
CCL_BACKEND = 'ccl'
|
8 |
+
MPI_BACKEND = 'mpi'
|
9 |
+
GLOO_BACKEND = 'gloo'
|
10 |
+
SCCL_BACKEND = 'sccl'
|
11 |
+
HCCL_BACKEND = 'hccl'
|
12 |
+
|
13 |
+
DEFAULT_AML_MASTER_PORT = "54965"
|
14 |
+
DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo"
|
15 |
+
|
16 |
+
#########################################
|
17 |
+
# Comms Logger
|
18 |
+
#########################################
|
19 |
+
# Comms Logger. By default, this feature is not enabled.
|
20 |
+
# Users can configure in ds_config.json as below example:
|
21 |
+
COMMS_LOGGER_FORMAT = '''
|
22 |
+
The Comms Logger can be specified as:
|
23 |
+
"comms_logger": {
|
24 |
+
"enabled": true,
|
25 |
+
"verbose": false,
|
26 |
+
"prof_all": true,
|
27 |
+
"debug": false,
|
28 |
+
"prof_ops": ["all_reduce", "custom_all_reduce_name"]
|
29 |
+
}
|
30 |
+
'''
|
31 |
+
COMMS_LOGGER = "comms_logger"
|
32 |
+
|
33 |
+
# Comms logger enable signal
|
34 |
+
COMMS_LOGGER_ENABLED = "enabled"
|
35 |
+
COMMS_LOGGER_ENABLED_DEFAULT = False
|
36 |
+
|
37 |
+
# Comms logger verbose signal
|
38 |
+
COMMS_LOGGER_VERBOSE = "verbose"
|
39 |
+
COMMS_LOGGER_VERBOSE_DEFAULT = False
|
40 |
+
|
41 |
+
# comms logger profile all ops signal
|
42 |
+
COMMS_LOGGER_PROF_ALL = "prof_all"
|
43 |
+
COMMS_LOGGER_PROF_ALL_DEFAULT = True
|
44 |
+
|
45 |
+
# comms logger show all ops signal
|
46 |
+
COMMS_LOGGER_DEBUG = "debug"
|
47 |
+
COMMS_LOGGER_DEBUG_DEFAULT = False
|
48 |
+
|
49 |
+
# comms logger profile specific ops in list
|
50 |
+
COMMS_LOGGER_PROF_OPS = "prof_ops"
|
51 |
+
COMMS_LOGGER_PROF_OPS_DEFAULT = []
|
venv/lib/python3.10/site-packages/deepspeed/comm/utils.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
import os
|
7 |
+
import inspect
|
8 |
+
from deepspeed.utils import get_caller_func
|
9 |
+
|
10 |
+
|
11 |
+
def get_local_rank_from_launcher():
|
12 |
+
|
13 |
+
# DeepSpeed launcher will set it so get from there
|
14 |
+
rank = os.environ.get('LOCAL_RANK')
|
15 |
+
|
16 |
+
if rank is None:
|
17 |
+
rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
|
18 |
+
|
19 |
+
# Make it a single process job and set rank to 0
|
20 |
+
if rank is None:
|
21 |
+
rank = 0
|
22 |
+
|
23 |
+
return int(rank)
|
24 |
+
|
25 |
+
|
26 |
+
def get_world_rank_from_launcher():
|
27 |
+
|
28 |
+
# DeepSpeed launcher will set it so get from there
|
29 |
+
rank = os.environ.get('RANK')
|
30 |
+
|
31 |
+
if rank is None:
|
32 |
+
rank = os.environ.get('OMPI_COMM_WORLD_RANK')
|
33 |
+
|
34 |
+
# Make it a single process job and set rank to 0
|
35 |
+
if rank is None:
|
36 |
+
rank = 0
|
37 |
+
|
38 |
+
return int(rank)
|
39 |
+
|
40 |
+
|
41 |
+
def get_world_size_from_launcher():
|
42 |
+
# DeepSpeed launcher will set it so get from there
|
43 |
+
size = os.environ.get('WORLD_SIZE')
|
44 |
+
rank = os.environ.get('RANK')
|
45 |
+
|
46 |
+
if size is None:
|
47 |
+
size = os.environ.get('OMPI_COMM_WORLD_SIZE')
|
48 |
+
|
49 |
+
# Make it a single process job and set size to 1
|
50 |
+
if size is None:
|
51 |
+
size = 1
|
52 |
+
|
53 |
+
if rank == 0:
|
54 |
+
print(f"set world size to {size}")
|
55 |
+
|
56 |
+
return int(size)
|
57 |
+
|
58 |
+
|
59 |
+
def get_default_args(func):
|
60 |
+
signature = inspect.signature(func)
|
61 |
+
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
|
62 |
+
|
63 |
+
|
64 |
+
# We need this hacky function since torch doesn't consistently name or place the input tensor args
|
65 |
+
def get_tensor_position(func):
|
66 |
+
sig_params = inspect.signature(func).parameters
|
67 |
+
arg = None
|
68 |
+
# most colls
|
69 |
+
if 'tensor' in sig_params:
|
70 |
+
arg = 'tensor'
|
71 |
+
# all_reduce_coalesced coll
|
72 |
+
elif 'tensors' in sig_params:
|
73 |
+
arg = 'tensors'
|
74 |
+
# reduce scatter coll
|
75 |
+
elif 'input_list' in sig_params:
|
76 |
+
arg = 'input_list'
|
77 |
+
# all_to_all and torch multiGPU colls
|
78 |
+
elif 'input_tensor_list' in sig_params:
|
79 |
+
arg = 'input_tensor_list'
|
80 |
+
if arg is None:
|
81 |
+
return -1
|
82 |
+
else:
|
83 |
+
return list(sig_params).index(arg)
|
84 |
+
|
85 |
+
|
86 |
+
def get_tensor_kwarg(func, kwargs):
|
87 |
+
func_args = get_default_args(func)
|
88 |
+
func_args.update(kwargs)
|
89 |
+
arg = None
|
90 |
+
|
91 |
+
if 'tensor' in func_args:
|
92 |
+
arg = func_args['tensor']
|
93 |
+
elif 'tensors' in func_args:
|
94 |
+
arg = func_args['tensors']
|
95 |
+
elif 'input_list' in func_args:
|
96 |
+
arg = func_args['input_list']
|
97 |
+
elif 'input_tensor_list' in func_args:
|
98 |
+
arg = func_args['input_tensor_list']
|
99 |
+
return arg
|
100 |
+
|
101 |
+
|
102 |
+
def get_msg_size_from_args(func, *args, **kwargs):
|
103 |
+
# 3 cases:
|
104 |
+
# - tensor arg is in args
|
105 |
+
# - tensor arg is in kwargs
|
106 |
+
# - tensor arg is not present (e.g. barrier)
|
107 |
+
tensor_arg_position = -1
|
108 |
+
tensor_arg = None
|
109 |
+
# check if tensor arg is in args
|
110 |
+
if len(args) > 0:
|
111 |
+
tensor_arg_position = get_tensor_position(func)
|
112 |
+
if tensor_arg_position > -1:
|
113 |
+
tensor_arg = args[get_tensor_position(func)]
|
114 |
+
# check if tensor arg is in kwargs
|
115 |
+
if tensor_arg is None and len(kwargs) > 0:
|
116 |
+
tensor_arg = get_tensor_kwarg(func, kwargs)
|
117 |
+
# if tensor arg is not present, no data is being transmitted
|
118 |
+
if tensor_arg is None:
|
119 |
+
return 0
|
120 |
+
else:
|
121 |
+
# Sum of tensor sizes for list colls such as torch's all_to_all
|
122 |
+
# NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
|
123 |
+
if type(tensor_arg) is list:
|
124 |
+
return sum(x.element_size() * x.nelement() for x in tensor_arg)
|
125 |
+
else:
|
126 |
+
return tensor_arg.element_size() * tensor_arg.nelement()
|
127 |
+
|
128 |
+
|
129 |
+
def get_debug_log_name(func_args, debug):
|
130 |
+
if debug:
|
131 |
+
return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
|
132 |
+
else:
|
133 |
+
return func_args['log_name']
|
venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (403 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/config.cpython-310.pyc
ADDED
Binary file (6.89 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/__pycache__/engine.cpython-310.pyc
ADDED
Binary file (19.8 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/base_engine.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base_moe.cpython-310.pyc
ADDED
Binary file (4.99 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/opt.cpython-310.pyc
ADDED
Binary file (5.52 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
'''Copyright The Microsoft DeepSpeed Team'''
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (231 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/experts.cpython-310.pyc
ADDED
Binary file (1.47 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/layer.cpython-310.pyc
ADDED
Binary file (5.44 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/mappings.cpython-310.pyc
ADDED
Binary file (3.01 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/sharded_moe.cpython-310.pyc
ADDED
Binary file (13.6 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (4.58 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/moe/experts.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
import copy
|
7 |
+
from typing import List, Optional
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
|
13 |
+
class Experts(nn.Module):
|
14 |
+
|
15 |
+
def __init__(self, expert: nn.Module, num_local_experts: int = 1, expert_group_name: Optional[str] = None) -> None:
|
16 |
+
super(Experts, self).__init__()
|
17 |
+
|
18 |
+
self.deepspeed_experts = nn.ModuleList([copy.deepcopy(expert) for _ in range(num_local_experts)])
|
19 |
+
self.num_local_experts = num_local_experts
|
20 |
+
|
21 |
+
# TODO: revisit allreduce for moe.gate...
|
22 |
+
for expert in self.deepspeed_experts:
|
23 |
+
# TODO: Create param groups to handle expert + data case (e.g. param.group = moe_group)
|
24 |
+
for param in expert.parameters():
|
25 |
+
param.allreduce = False
|
26 |
+
param.group_name = expert_group_name
|
27 |
+
|
28 |
+
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
|
29 |
+
chunks = inputs.chunk(self.num_local_experts, dim=1)
|
30 |
+
expert_outputs: List[torch.Tensor] = []
|
31 |
+
|
32 |
+
for chunk, expert in zip(chunks, self.deepspeed_experts):
|
33 |
+
out = expert(chunk)
|
34 |
+
if isinstance(out, tuple):
|
35 |
+
out = out[0] # Ignore the bias term for now
|
36 |
+
expert_outputs += [out]
|
37 |
+
|
38 |
+
return torch.cat(expert_outputs, dim=1)
|
venv/lib/python3.10/site-packages/deepspeed/moe/layer.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import nn
|
10 |
+
from torch.nn import functional as F
|
11 |
+
|
12 |
+
from deepspeed.utils import groups, log_dist
|
13 |
+
from .experts import Experts
|
14 |
+
from .sharded_moe import MOELayer, TopKGate
|
15 |
+
|
16 |
+
|
17 |
+
class MoE(nn.Module):
|
18 |
+
"""Initialize an MoE layer.
|
19 |
+
|
20 |
+
Arguments:
|
21 |
+
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
|
22 |
+
expert (nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
|
23 |
+
num_experts (int, optional): default=1, the total number of experts per layer.
|
24 |
+
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
|
25 |
+
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
|
26 |
+
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
|
27 |
+
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
|
28 |
+
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
|
29 |
+
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
|
30 |
+
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
|
31 |
+
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
|
32 |
+
use_rts (bool, optional): default=True, whether to use Random Token Selection.
|
33 |
+
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
|
34 |
+
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
|
35 |
+
top2_2nd_expert_sampling (bool, optional): default=True, whether to perform sampling for 2nd expert
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(self,
|
39 |
+
hidden_size: int,
|
40 |
+
expert: nn.Module,
|
41 |
+
num_experts: int = 1,
|
42 |
+
ep_size: int = 1,
|
43 |
+
k: int = 1,
|
44 |
+
capacity_factor: float = 1.0,
|
45 |
+
eval_capacity_factor: float = 1.0,
|
46 |
+
min_capacity: int = 4,
|
47 |
+
use_residual: bool = False,
|
48 |
+
noisy_gate_policy: Optional[str] = None,
|
49 |
+
drop_tokens: bool = True,
|
50 |
+
use_rts: bool = True,
|
51 |
+
use_tutel: bool = False,
|
52 |
+
enable_expert_tensor_parallelism: bool = False,
|
53 |
+
top2_2nd_expert_sampling: bool = True) -> None:
|
54 |
+
|
55 |
+
super(MoE, self).__init__()
|
56 |
+
|
57 |
+
self.use_residual = use_residual
|
58 |
+
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
|
59 |
+
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
|
60 |
+
self.ep_size = ep_size
|
61 |
+
self.expert_group_name = f"ep_size_{self.ep_size}"
|
62 |
+
self.num_experts = num_experts
|
63 |
+
self.num_local_experts = num_experts // self.ep_size
|
64 |
+
|
65 |
+
log_dist(
|
66 |
+
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
|
67 |
+
[0])
|
68 |
+
|
69 |
+
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
|
70 |
+
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
|
71 |
+
|
72 |
+
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
|
73 |
+
self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor,
|
74 |
+
min_capacity, noisy_gate_policy, drop_tokens, use_rts, None,
|
75 |
+
top2_2nd_expert_sampling),
|
76 |
+
experts,
|
77 |
+
self.expert_group_name,
|
78 |
+
self.ep_size,
|
79 |
+
self.num_local_experts,
|
80 |
+
use_tutel=use_tutel)
|
81 |
+
if self.use_residual:
|
82 |
+
self.mlp = expert
|
83 |
+
# coefficient is used for weighted sum of the output of expert and mlp
|
84 |
+
self.coefficient = nn.Linear(hidden_size, 2)
|
85 |
+
|
86 |
+
def set_deepspeed_parallelism(self, use_data_before_expert_parallel_: bool = False) -> None:
|
87 |
+
self._create_process_groups(use_data_before_expert_parallel_=use_data_before_expert_parallel_)
|
88 |
+
|
89 |
+
def _create_process_groups(self, use_data_before_expert_parallel_: bool = False) -> None:
|
90 |
+
# Create process group for a layer if needed
|
91 |
+
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
|
92 |
+
print(f"No existing process group found, creating a new group named: {self.expert_group_name}")
|
93 |
+
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
|
94 |
+
# Condition 1 - no groups.mpu means no tensor parallelism
|
95 |
+
# Condition 2 - disabling expert tensor parallelism on purpose
|
96 |
+
groups._create_expert_and_data_parallel(
|
97 |
+
self.ep_size, use_data_before_expert_parallel_=use_data_before_expert_parallel_)
|
98 |
+
else:
|
99 |
+
# expert tensor parallelism is enabled
|
100 |
+
groups._create_expert_data_and_model_parallel(
|
101 |
+
self.ep_size, mpu=groups.mpu, use_data_before_expert_parallel_=use_data_before_expert_parallel_)
|
102 |
+
# Set the group handle for the MOELayer (deepspeed_moe) object
|
103 |
+
self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name))
|
104 |
+
|
105 |
+
def forward(self,
|
106 |
+
hidden_states: torch.Tensor,
|
107 |
+
used_token: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
108 |
+
""" MoE forward
|
109 |
+
|
110 |
+
Arguments:
|
111 |
+
hidden_states (Tensor): input to the layer
|
112 |
+
used_token (Tensor, optional): default: None, mask only used tokens
|
113 |
+
|
114 |
+
Returns:
|
115 |
+
A tuple including output, gate loss, and expert count.
|
116 |
+
|
117 |
+
* output (Tensor): output of the model
|
118 |
+
|
119 |
+
* l_aux (Tensor): gate loss value
|
120 |
+
|
121 |
+
* exp_counts (Tensor): expert count
|
122 |
+
"""
|
123 |
+
output = self.deepspeed_moe(hidden_states, used_token)
|
124 |
+
if self.use_residual:
|
125 |
+
# Residual MoE
|
126 |
+
output_mlp = self.mlp(hidden_states)
|
127 |
+
if isinstance(output_mlp, tuple):
|
128 |
+
output_mlp = output_mlp[0] # Ignore the bias term for now
|
129 |
+
coef = self.coefficient(hidden_states)
|
130 |
+
coef = F.softmax(coef, dim=-1)
|
131 |
+
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
|
132 |
+
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
|
venv/lib/python3.10/site-packages/deepspeed/moe/mappings.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# The file has been adapted from the following Megatron-LM file:
|
7 |
+
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py
|
8 |
+
# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796
|
9 |
+
# We retain the following copyright from the original files:
|
10 |
+
|
11 |
+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
12 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
13 |
+
# you may not use this file except in compliance with the License.
|
14 |
+
# You may obtain a copy of the License at
|
15 |
+
#
|
16 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
17 |
+
#
|
18 |
+
# Unless required by applicable law or agreed to in writing, software
|
19 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
20 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
21 |
+
# See the License for the specific language governing permissions and
|
22 |
+
# limitations under the License.
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import deepspeed
|
26 |
+
from deepspeed.utils.bwc import (bwc_tensor_model_parallel_world_size, bwc_tensor_model_parallel_rank,
|
27 |
+
bwc_tensor_model_parallel_group)
|
28 |
+
|
29 |
+
|
30 |
+
def _gather_tokens(input_, dim=0):
|
31 |
+
"""Gather tensors and concatenate them along a dimension"""
|
32 |
+
mpu = deepspeed.utils.groups.mpu
|
33 |
+
|
34 |
+
input_ = input_.contiguous()
|
35 |
+
# Size and dimension.
|
36 |
+
rank = bwc_tensor_model_parallel_rank(mpu)
|
37 |
+
|
38 |
+
tensor_list = [torch.empty_like(input_) for _ in range(bwc_tensor_model_parallel_world_size(mpu))]
|
39 |
+
tensor_list[rank] = input_
|
40 |
+
deepspeed.comm.all_gather(tensor_list, input_, group=bwc_tensor_model_parallel_group(mpu))
|
41 |
+
|
42 |
+
# Note: torch.cat already creates a contiguous tensor.
|
43 |
+
output = torch.cat(tensor_list, dim=dim).contiguous()
|
44 |
+
|
45 |
+
return output
|
46 |
+
|
47 |
+
|
48 |
+
def _drop_tokens(input_, dim=0):
|
49 |
+
"""Divide a tensor among the tensor parallel ranks"""
|
50 |
+
mpu = deepspeed.utils.groups.mpu
|
51 |
+
|
52 |
+
total_chunks = bwc_tensor_model_parallel_world_size(mpu)
|
53 |
+
this_chunk = bwc_tensor_model_parallel_rank(mpu)
|
54 |
+
assert input_.shape[
|
55 |
+
dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})"
|
56 |
+
chunk_size = input_.shape[dim] // total_chunks
|
57 |
+
|
58 |
+
return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size)
|
59 |
+
|
60 |
+
|
61 |
+
class _GatherTokens(torch.autograd.Function):
|
62 |
+
"""All gather tokens among the tensor parallel ranks"""
|
63 |
+
|
64 |
+
@staticmethod
|
65 |
+
def symbolic(graph, input_, dim):
|
66 |
+
return _gather_tokens(input_, dim)
|
67 |
+
|
68 |
+
@staticmethod
|
69 |
+
def forward(ctx, input_, dim):
|
70 |
+
ctx.dim = dim
|
71 |
+
return _gather_tokens(input_, dim)
|
72 |
+
|
73 |
+
@staticmethod
|
74 |
+
def backward(ctx, grad_output):
|
75 |
+
return _drop_tokens(grad_output, ctx.dim), None
|
76 |
+
|
77 |
+
|
78 |
+
class _DropTokens(torch.autograd.Function):
|
79 |
+
"Divide tokens equally among the tensor parallel ranks"
|
80 |
+
|
81 |
+
@staticmethod
|
82 |
+
def symbolic(graph, input_, dim):
|
83 |
+
return _drop_tokens(input_, dim)
|
84 |
+
|
85 |
+
@staticmethod
|
86 |
+
def forward(ctx, input_, dim):
|
87 |
+
ctx.dim = dim
|
88 |
+
return _drop_tokens(input_, dim)
|
89 |
+
|
90 |
+
@staticmethod
|
91 |
+
def backward(ctx, input_):
|
92 |
+
return _gather_tokens(input_, ctx.dim), None
|
93 |
+
|
94 |
+
|
95 |
+
def gather_tokens(input_, dim=0):
|
96 |
+
mpu = deepspeed.utils.groups.mpu
|
97 |
+
if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1:
|
98 |
+
# no tensor parallelism for non-experts
|
99 |
+
return input_
|
100 |
+
return _GatherTokens.apply(input_, dim)
|
101 |
+
|
102 |
+
|
103 |
+
def drop_tokens(input_, dim=0):
|
104 |
+
mpu = deepspeed.utils.groups.mpu
|
105 |
+
if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1:
|
106 |
+
# no tensor parallelism for non-experts
|
107 |
+
return input_
|
108 |
+
return _DropTokens.apply(input_, dim)
|
venv/lib/python3.10/site-packages/deepspeed/moe/sharded_moe.py
ADDED
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
"""
|
6 |
+
The file has been adapted from two fairscale files:
|
7 |
+
(1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py
|
8 |
+
(2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py
|
9 |
+
Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf
|
10 |
+
We retain the following license from the original files:
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
14 |
+
#
|
15 |
+
# This source code is licensed under the BSD license found in the
|
16 |
+
# LICENSE file in the root directory of this source tree.
|
17 |
+
|
18 |
+
from deepspeed.utils.timer import SynchronizedWallClockTimer
|
19 |
+
from deepspeed.utils import logger
|
20 |
+
from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size
|
21 |
+
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
from torch import Tensor
|
25 |
+
from torch.nn import Module
|
26 |
+
import torch.nn.functional as F
|
27 |
+
from deepspeed.utils import groups
|
28 |
+
from .mappings import drop_tokens, gather_tokens
|
29 |
+
|
30 |
+
if TYPE_CHECKING:
|
31 |
+
Base = Module[Tensor]
|
32 |
+
else:
|
33 |
+
Base = Module
|
34 |
+
|
35 |
+
TOPK_GATE_TIMER = 'topk_gate'
|
36 |
+
MOE_TIMER = 'moe'
|
37 |
+
FIRST_ALLTOALL_TIMER = '1st_a2a'
|
38 |
+
SECOND_ALLTOALL_TIMER = '2nd_a2a'
|
39 |
+
|
40 |
+
uniform_map: Dict[torch.device, Callable] = {}
|
41 |
+
gumbel_map: Dict[torch.device, Callable] = {}
|
42 |
+
exp_selection_uniform_map: Dict[torch.device, Callable] = {}
|
43 |
+
|
44 |
+
try:
|
45 |
+
# To enable Tutel MoE optimizations:
|
46 |
+
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/[email protected]
|
47 |
+
from tutel import moe as tutel_moe
|
48 |
+
TUTEL_INSTALLED = True
|
49 |
+
except:
|
50 |
+
# Fail silently so we don't spam logs unnecessarily if user isn't using tutel
|
51 |
+
TUTEL_INSTALLED = False
|
52 |
+
pass
|
53 |
+
|
54 |
+
|
55 |
+
def multiplicative_jitter(x, device: torch.device, epsilon=1e-2):
|
56 |
+
"""
|
57 |
+
Modified from switch transformer paper. mesh transformers
|
58 |
+
Multiply values by a random number between 1-epsilon and 1+epsilon.
|
59 |
+
Makes models more resilient to rounding errors introduced by bfloat16.
|
60 |
+
This seems particularly important for logits.
|
61 |
+
Args:
|
62 |
+
x: a torch.tensor
|
63 |
+
device: torch.device
|
64 |
+
epsilon: a floating point value
|
65 |
+
Returns:
|
66 |
+
a jittered x.
|
67 |
+
"""
|
68 |
+
if epsilon == 0:
|
69 |
+
return x
|
70 |
+
uniform = uniform_map.get(device)
|
71 |
+
if uniform is None:
|
72 |
+
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(1.0 - epsilon, device=device),
|
73 |
+
high=torch.tensor(1.0 + epsilon,
|
74 |
+
device=device)).rsample # type: ignore
|
75 |
+
uniform_map[device] = uniform
|
76 |
+
return x * uniform(x.shape)
|
77 |
+
|
78 |
+
|
79 |
+
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
|
80 |
+
gumbel = gumbel_map.get(device)
|
81 |
+
if gumbel is None:
|
82 |
+
one = torch.tensor(1.0, device=device)
|
83 |
+
zero = torch.tensor(0.0, device=device)
|
84 |
+
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
|
85 |
+
gumbel_map[device] = gumbel
|
86 |
+
return gumbel(shape)
|
87 |
+
|
88 |
+
|
89 |
+
from deepspeed import comm as dist
|
90 |
+
|
91 |
+
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
|
92 |
+
# See https://arxiv.org/pdf/2006.16668.pdf for details.
|
93 |
+
|
94 |
+
|
95 |
+
# Based on https://github.com/pytorch/pytorch/pull/40762
|
96 |
+
class _AllToAll(torch.autograd.Function):
|
97 |
+
|
98 |
+
@staticmethod
|
99 |
+
def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor) -> Tensor: # type: ignore
|
100 |
+
ctx.group = group
|
101 |
+
input = input.contiguous()
|
102 |
+
output = torch.empty_like(input)
|
103 |
+
dist.all_to_all_single(output, input, group=group)
|
104 |
+
return output
|
105 |
+
|
106 |
+
@staticmethod
|
107 |
+
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
|
108 |
+
return (None, _AllToAll.apply(ctx.group, *grad_output))
|
109 |
+
|
110 |
+
|
111 |
+
# einsum rewrites are on par or more performant
|
112 |
+
# switch can be bubbled up in future
|
113 |
+
USE_EINSUM = True
|
114 |
+
|
115 |
+
|
116 |
+
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
|
117 |
+
# See https://arxiv.org/pdf/2006.16668.pdf for details.
|
118 |
+
def einsum(rule, a, b):
|
119 |
+
if USE_EINSUM:
|
120 |
+
return torch.einsum(rule, a, b)
|
121 |
+
elif rule == 's,se->se':
|
122 |
+
return a.reshape(a.shape[0], -1) * b
|
123 |
+
elif rule == 'se,sc->sec':
|
124 |
+
return a.unsqueeze(2) * b.unsqueeze(1)
|
125 |
+
elif rule == 'se,se->s':
|
126 |
+
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
|
127 |
+
elif rule == 'sec,sm->ecm':
|
128 |
+
s = a.shape[0]
|
129 |
+
e = a.shape[1]
|
130 |
+
c = a.shape[2]
|
131 |
+
m = b.shape[1]
|
132 |
+
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
|
133 |
+
elif rule == 'sec,ecm->sm':
|
134 |
+
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
|
135 |
+
elif rule == 'ks,ksm->sm':
|
136 |
+
k = b.shape[0]
|
137 |
+
s = b.shape[1]
|
138 |
+
m = b.shape[2]
|
139 |
+
# [k, s] -> [s, k] -> [s, 1, k]
|
140 |
+
a = a.t().unsqueeze(1)
|
141 |
+
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
|
142 |
+
b = b.reshape(k, -1).t().reshape(s, m, k)
|
143 |
+
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
|
144 |
+
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
|
145 |
+
else:
|
146 |
+
return torch.einsum(rule, a, b)
|
147 |
+
|
148 |
+
|
149 |
+
# The following functions are extracted and scripted
|
150 |
+
# because otherwise during a torch.jit.trace, the non-Tensor
|
151 |
+
# values used in the calculations get recorded as constants.
|
152 |
+
# torch.jit.script coerces them into Tensors and preserves
|
153 |
+
# their dynamic shapes. This enables ONNX export.
|
154 |
+
# We can't script the entire top1gating function because it
|
155 |
+
# includes stateful caching logic which is incompatible with ONNX.
|
156 |
+
|
157 |
+
|
158 |
+
@torch.jit.script
|
159 |
+
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
|
160 |
+
# gates has shape of SE
|
161 |
+
num_tokens = gates.shape[0]
|
162 |
+
num_experts = gates.shape[1]
|
163 |
+
# to(torch.int64) works around a bug in torch.onnx.export:
|
164 |
+
# it should cast k to int64 when converting torch.topk but it doesn't.
|
165 |
+
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
|
166 |
+
if capacity < min_capacity:
|
167 |
+
capacity = min_capacity.to(torch.int64)
|
168 |
+
return capacity
|
169 |
+
|
170 |
+
|
171 |
+
@torch.jit.script
|
172 |
+
def _top_idx(source, k):
|
173 |
+
return torch.topk(source, k=k, dim=0)[1]
|
174 |
+
|
175 |
+
|
176 |
+
@torch.jit.script
|
177 |
+
def _one_hot_to_float(x, num_classes):
|
178 |
+
return F.one_hot(x, num_classes=num_classes).float()
|
179 |
+
|
180 |
+
|
181 |
+
def top1gating(logits: Tensor,
|
182 |
+
capacity_factor: float,
|
183 |
+
min_capacity: int,
|
184 |
+
used_token: Tensor = None,
|
185 |
+
noisy_gate_policy: Optional[str] = None,
|
186 |
+
drop_tokens: bool = True,
|
187 |
+
use_rts: bool = True,
|
188 |
+
ep_group: Union[torch.distributed.ProcessGroup, None] = None,
|
189 |
+
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
|
190 |
+
"""Implements Top1Gating on logits."""
|
191 |
+
if noisy_gate_policy == 'RSample':
|
192 |
+
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
|
193 |
+
# everything is in fp32 in this function
|
194 |
+
gates = F.softmax(logits, dim=1)
|
195 |
+
|
196 |
+
capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity))
|
197 |
+
|
198 |
+
# Create a mask for 1st's expert per token
|
199 |
+
# noisy gating
|
200 |
+
indices1_s = torch.argmax(logits_w_noise if noisy_gate_policy == 'RSample' else gates, dim=1)
|
201 |
+
num_experts = int(gates.shape[1])
|
202 |
+
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
|
203 |
+
|
204 |
+
# mask only used tokens
|
205 |
+
if used_token is not None:
|
206 |
+
mask1 = einsum("s,se->se", used_token, mask1)
|
207 |
+
|
208 |
+
# gating decisions
|
209 |
+
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
|
210 |
+
|
211 |
+
# if we don't want to drop any tokens
|
212 |
+
if not drop_tokens:
|
213 |
+
new_capacity = torch.max(exp_counts).to(logits.device)
|
214 |
+
# Communicate across expert processes to pick the maximum capacity.
|
215 |
+
if ep_group is not None:
|
216 |
+
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group)
|
217 |
+
if groups._get_expert_model_parallel_world_size() == 1:
|
218 |
+
# If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'.
|
219 |
+
# This is since we are going to activate drop_tokens() to drop duplicate tokens.
|
220 |
+
tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu)
|
221 |
+
new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype)
|
222 |
+
# Make sure the capacity value does not exceed the number of tokens.
|
223 |
+
capacity = min(new_capacity, torch.tensor(mask1.size(0)))
|
224 |
+
|
225 |
+
# Compute l_aux
|
226 |
+
me = torch.mean(gates, dim=0)
|
227 |
+
ce = torch.mean(mask1.float(), dim=0)
|
228 |
+
l_aux = torch.sum(me * ce) * num_experts
|
229 |
+
|
230 |
+
# Random Token Selection
|
231 |
+
if use_rts:
|
232 |
+
uniform = exp_selection_uniform_map.get(logits.device)
|
233 |
+
if uniform is None:
|
234 |
+
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(0.0, device=logits.device),
|
235 |
+
high=torch.tensor(1.0, device=logits.device)).rsample
|
236 |
+
exp_selection_uniform_map[logits.device] = uniform
|
237 |
+
|
238 |
+
mask1_rand = mask1 * uniform(mask1.shape)
|
239 |
+
else:
|
240 |
+
mask1_rand = mask1
|
241 |
+
|
242 |
+
assert logits.shape[
|
243 |
+
0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size."
|
244 |
+
|
245 |
+
top_idx = _top_idx(mask1_rand, capacity)
|
246 |
+
|
247 |
+
new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)
|
248 |
+
mask1 = new_mask1
|
249 |
+
|
250 |
+
if use_tutel:
|
251 |
+
# Tutel doesn't support index values masked with zero
|
252 |
+
# so we need to replace masked indices with -1
|
253 |
+
indices_mask = mask1.sum(dim=1) * num_experts - 1
|
254 |
+
indices1_s = torch.min(indices1_s, indices_mask)
|
255 |
+
|
256 |
+
# Compute locations in capacity buffer
|
257 |
+
if use_tutel:
|
258 |
+
locations1 = tutel_moe.fast_cumsum_sub_one(mask1)
|
259 |
+
else:
|
260 |
+
locations1 = torch.cumsum(mask1, dim=0) - 1
|
261 |
+
|
262 |
+
if use_tutel:
|
263 |
+
gates1_s = (gates * mask1).sum(dim=1)
|
264 |
+
locations1_s = torch.sum(locations1 * mask1, dim=1)
|
265 |
+
return l_aux, capacity, num_experts, [
|
266 |
+
indices1_s,
|
267 |
+
], [
|
268 |
+
locations1_s,
|
269 |
+
], [
|
270 |
+
gates1_s,
|
271 |
+
], exp_counts
|
272 |
+
|
273 |
+
# Store the capacity location for each token
|
274 |
+
locations1_s = torch.sum(locations1 * mask1, dim=1)
|
275 |
+
|
276 |
+
# Normalize gate probabilities
|
277 |
+
mask1_float = mask1.float()
|
278 |
+
gates = gates * mask1_float
|
279 |
+
|
280 |
+
locations1_sc = _one_hot_to_float(locations1_s, capacity)
|
281 |
+
combine_weights = einsum("se,sc->sec", gates, locations1_sc)
|
282 |
+
|
283 |
+
dispatch_mask = combine_weights.bool()
|
284 |
+
|
285 |
+
return l_aux, combine_weights, dispatch_mask, exp_counts
|
286 |
+
|
287 |
+
|
288 |
+
def top2gating(logits: Tensor,
|
289 |
+
capacity_factor: float,
|
290 |
+
min_capacity: int,
|
291 |
+
drop_tokens: bool = True,
|
292 |
+
ep_group: Union[torch.distributed.ProcessGroup, None] = None,
|
293 |
+
top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
|
294 |
+
"""Implements Top2Gating on logits."""
|
295 |
+
# everything is in fp32 in this function
|
296 |
+
gates = F.softmax(logits, dim=1)
|
297 |
+
|
298 |
+
# Create a mask for 1st's expert per token
|
299 |
+
indices1_s = torch.argmax(gates, dim=1)
|
300 |
+
num_experts = int(gates.shape[1])
|
301 |
+
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
|
302 |
+
|
303 |
+
if top2_2nd_expert_sampling:
|
304 |
+
# Create a mask for 2nd's expert per token using Gumbel-max trick
|
305 |
+
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
|
306 |
+
logits += gumbel_rsample(logits.shape, device=logits.device)
|
307 |
+
|
308 |
+
# Replace top-expert with min value
|
309 |
+
logits_except1 = logits.masked_fill(mask1.bool(), float("-inf"))
|
310 |
+
indices2_s = torch.argmax(logits_except1, dim=1)
|
311 |
+
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
|
312 |
+
|
313 |
+
# Compute locations in capacity buffer
|
314 |
+
locations1 = torch.cumsum(mask1, dim=0) - 1
|
315 |
+
locations2 = torch.cumsum(mask2, dim=0) - 1
|
316 |
+
# Update 2nd's location by accounting for locations of 1st
|
317 |
+
locations2 += torch.sum(mask1, dim=0, keepdim=True)
|
318 |
+
|
319 |
+
# Compute l_aux
|
320 |
+
me = torch.mean(gates, dim=0)
|
321 |
+
ce = torch.mean(mask1.float(), dim=0)
|
322 |
+
l_aux = torch.mean(me * ce) * num_experts * num_experts
|
323 |
+
|
324 |
+
# gating decisions
|
325 |
+
exp_counts = torch.sum(mask1 + mask2, dim=0)
|
326 |
+
|
327 |
+
if drop_tokens:
|
328 |
+
# Calculate configured capacity and remove locations outside capacity from mask
|
329 |
+
capacity = _capacity(gates, torch.tensor(capacity_factor * 2), torch.tensor(min_capacity))
|
330 |
+
mask1 *= torch.lt(locations1, capacity)
|
331 |
+
mask2 *= torch.lt(locations2, capacity)
|
332 |
+
else:
|
333 |
+
# Do not drop tokens - set capacity according to current expert assignments
|
334 |
+
new_capacity = torch.max(exp_counts)
|
335 |
+
if ep_group is not None:
|
336 |
+
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=ep_group)
|
337 |
+
if groups._get_expert_model_parallel_world_size() == 1:
|
338 |
+
# If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'.
|
339 |
+
# This is since we are going to activate drop_tokens() to drop duplicate tokens.
|
340 |
+
tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu)
|
341 |
+
new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype)
|
342 |
+
capacity = new_capacity
|
343 |
+
|
344 |
+
# Store the capacity location for each token
|
345 |
+
locations1_s = torch.sum(locations1 * mask1, dim=1)
|
346 |
+
locations2_s = torch.sum(locations2 * mask2, dim=1)
|
347 |
+
|
348 |
+
# Normalize gate probabilities
|
349 |
+
mask1_float = mask1.float()
|
350 |
+
mask2_float = mask2.float()
|
351 |
+
gates1_s = einsum("se,se->s", gates, mask1_float)
|
352 |
+
gates2_s = einsum("se,se->s", gates, mask2_float)
|
353 |
+
denom_s = gates1_s + gates2_s
|
354 |
+
# Avoid divide-by-zero
|
355 |
+
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
|
356 |
+
gates1_s /= denom_s
|
357 |
+
gates2_s /= denom_s
|
358 |
+
|
359 |
+
# Calculate combine_weights and dispatch_mask
|
360 |
+
gates1 = einsum("s,se->se", gates1_s, mask1_float)
|
361 |
+
gates2 = einsum("s,se->se", gates2_s, mask2_float)
|
362 |
+
locations1_sc = _one_hot_to_float(locations1_s, capacity)
|
363 |
+
locations2_sc = _one_hot_to_float(locations2_s, capacity)
|
364 |
+
combine1_sec = einsum("se,sc->sec", gates1, locations1_sc)
|
365 |
+
combine2_sec = einsum("se,sc->sec", gates2, locations2_sc)
|
366 |
+
combine_weights = combine1_sec + combine2_sec
|
367 |
+
dispatch_mask = combine_weights.bool()
|
368 |
+
|
369 |
+
return l_aux, combine_weights, dispatch_mask, exp_counts.detach().to('cpu')
|
370 |
+
|
371 |
+
|
372 |
+
class TopKGate(Module):
|
373 |
+
"""Gate module which implements Top2Gating as described in Gshard_.
|
374 |
+
::
|
375 |
+
|
376 |
+
gate = TopKGate(model_dim, num_experts)
|
377 |
+
l_aux, combine_weights, dispatch_mask = gate(input)
|
378 |
+
|
379 |
+
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
|
380 |
+
|
381 |
+
Args:
|
382 |
+
model_dim (int):
|
383 |
+
size of model embedding dimension
|
384 |
+
num_experts (int):
|
385 |
+
number of experts in model
|
386 |
+
"""
|
387 |
+
|
388 |
+
wg: torch.nn.Linear
|
389 |
+
|
390 |
+
def __init__(self,
|
391 |
+
model_dim: int,
|
392 |
+
num_experts: int,
|
393 |
+
k: int = 1,
|
394 |
+
capacity_factor: float = 1.0,
|
395 |
+
eval_capacity_factor: float = 1.0,
|
396 |
+
min_capacity: int = 8,
|
397 |
+
noisy_gate_policy: Optional[str] = None,
|
398 |
+
drop_tokens: bool = True,
|
399 |
+
use_rts: bool = True,
|
400 |
+
ep_group: Union[torch.distributed.ProcessGroup, None] = None,
|
401 |
+
top2_2nd_expert_sampling: bool = True) -> None:
|
402 |
+
super().__init__()
|
403 |
+
|
404 |
+
# Only top-1 and top-2 are supported at the moment.
|
405 |
+
if k != 1 and k != 2:
|
406 |
+
raise ValueError('Only top-1 and top-2 gatings are supported.')
|
407 |
+
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)
|
408 |
+
self.ep_group = ep_group
|
409 |
+
self.k = k
|
410 |
+
self.capacity_factor = capacity_factor
|
411 |
+
self.eval_capacity_factor = eval_capacity_factor
|
412 |
+
self.min_capacity = min_capacity
|
413 |
+
self.noisy_gate_policy = noisy_gate_policy
|
414 |
+
self.timers = SynchronizedWallClockTimer()
|
415 |
+
self.wall_clock_breakdown = False
|
416 |
+
self.gate_time = 0.0
|
417 |
+
self.drop_tokens = drop_tokens
|
418 |
+
self.use_rts = use_rts
|
419 |
+
self.top2_2nd_expert_sampling = top2_2nd_expert_sampling
|
420 |
+
|
421 |
+
def _set_ep_group(self, ep_group):
|
422 |
+
assert self.ep_group is None, f'Attempting to override an existing ep_group'
|
423 |
+
self.ep_group = ep_group
|
424 |
+
|
425 |
+
def forward(self,
|
426 |
+
input: torch.Tensor,
|
427 |
+
used_token: torch.Tensor = None,
|
428 |
+
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
|
429 |
+
|
430 |
+
if self.wall_clock_breakdown:
|
431 |
+
self.timers(TOPK_GATE_TIMER).start()
|
432 |
+
|
433 |
+
input_fp32 = input.float()
|
434 |
+
# input jittering
|
435 |
+
if self.noisy_gate_policy == 'Jitter' and self.training:
|
436 |
+
input_fp32 = multiplicative_jitter(input_fp32, device=input.device)
|
437 |
+
logits = torch.nn.functional.linear(input_fp32, weight=self.wg.weight.float(), bias=None)
|
438 |
+
|
439 |
+
if self.k == 1:
|
440 |
+
gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
|
441 |
+
self.min_capacity, used_token, self.noisy_gate_policy if self.training else None,
|
442 |
+
self.drop_tokens, self.use_rts, self.ep_group, use_tutel)
|
443 |
+
|
444 |
+
else:
|
445 |
+
gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
|
446 |
+
self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling)
|
447 |
+
|
448 |
+
if self.wall_clock_breakdown:
|
449 |
+
self.timers(TOPK_GATE_TIMER).stop()
|
450 |
+
self.gate_time = self.timers(TOPK_GATE_TIMER).elapsed(reset=False)
|
451 |
+
|
452 |
+
return gate_output
|
453 |
+
|
454 |
+
|
455 |
+
class MOELayer(Base):
|
456 |
+
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
|
457 |
+
::
|
458 |
+
|
459 |
+
gate = TopKGate(model_dim, num_experts)
|
460 |
+
moe = MOELayer(gate, expert)
|
461 |
+
output = moe(input)
|
462 |
+
l_aux = moe.l_aux
|
463 |
+
|
464 |
+
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
|
465 |
+
|
466 |
+
Args:
|
467 |
+
gate (torch.nn.Module):
|
468 |
+
gate network
|
469 |
+
expert (torch.nn.Module):
|
470 |
+
expert network
|
471 |
+
"""
|
472 |
+
|
473 |
+
def __init__(self,
|
474 |
+
gate: Module,
|
475 |
+
experts: Module,
|
476 |
+
ep_group_name,
|
477 |
+
ep_size,
|
478 |
+
num_local_experts: int,
|
479 |
+
use_tutel: bool = False) -> None:
|
480 |
+
super().__init__()
|
481 |
+
self.gate = gate
|
482 |
+
self.experts = experts
|
483 |
+
self.ep_group = None
|
484 |
+
self.ep_size = ep_size
|
485 |
+
self.ep_group_name = ep_group_name
|
486 |
+
self.num_local_experts = num_local_experts
|
487 |
+
self.time_falltoall = 0.0
|
488 |
+
self.time_salltoall = 0.0
|
489 |
+
self.time_moe = 0.0
|
490 |
+
self.timers = SynchronizedWallClockTimer()
|
491 |
+
self.wall_clock_breakdown = False
|
492 |
+
|
493 |
+
self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1
|
494 |
+
|
495 |
+
if self.use_tutel:
|
496 |
+
logger.info('Using Tutel optimizations.')
|
497 |
+
elif use_tutel and not TUTEL_INSTALLED:
|
498 |
+
logger.warning("Tutel optimization requested but not installed. "
|
499 |
+
"Proceeding without Tutel.")
|
500 |
+
elif use_tutel and TUTEL_INSTALLED and gate.k != 1:
|
501 |
+
logger.warning("To enable Tutel optimization, use top-1 instead of top-2 gate. "
|
502 |
+
"Proceeding without Tutel.")
|
503 |
+
|
504 |
+
def _set_ep_group(self, ep_group):
|
505 |
+
self.ep_group = ep_group
|
506 |
+
self.gate._set_ep_group(ep_group)
|
507 |
+
|
508 |
+
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
|
509 |
+
|
510 |
+
if self.wall_clock_breakdown:
|
511 |
+
self.timers(MOE_TIMER).start()
|
512 |
+
|
513 |
+
# Implement Algorithm 2 from GShard paper.
|
514 |
+
d_model = input[0].shape[-1]
|
515 |
+
|
516 |
+
# Initial implementation -> Reshape into S tokens by dropping sequence dimension.
|
517 |
+
# Reshape into G groups so that each group can distribute tokens equally
|
518 |
+
# group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1
|
519 |
+
reshaped_input = input[0].reshape(-1, d_model)
|
520 |
+
|
521 |
+
if self.use_tutel:
|
522 |
+
self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True)
|
523 |
+
S, M = reshaped_input.size(0), reshaped_input.size(1)
|
524 |
+
|
525 |
+
if not hasattr(self, '_tutel_dispatcher'):
|
526 |
+
self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype)
|
527 |
+
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
|
528 |
+
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
|
529 |
+
else:
|
530 |
+
self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1])
|
531 |
+
dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input)
|
532 |
+
|
533 |
+
if self.wall_clock_breakdown:
|
534 |
+
self.timers(FIRST_ALLTOALL_TIMER).start()
|
535 |
+
|
536 |
+
if groups._get_expert_model_parallel_world_size() == 1:
|
537 |
+
# If the non-expert is tensor-parallel, it will create
|
538 |
+
# duplicate tokens on the tensor-parallel ranks.
|
539 |
+
# Since our experts are not tensor-parallel, these duplicates
|
540 |
+
# need to be dropped to ensure correctness.
|
541 |
+
# this also doubles up as a communication optimization as we are
|
542 |
+
# reducing the all-to-all communication volume.
|
543 |
+
dispatched_input = drop_tokens(dispatched_input, dim=1)
|
544 |
+
|
545 |
+
dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input)
|
546 |
+
|
547 |
+
if self.wall_clock_breakdown:
|
548 |
+
self.timers(FIRST_ALLTOALL_TIMER).stop()
|
549 |
+
self.time_falltoall = self.timers(FIRST_ALLTOALL_TIMER).elapsed(reset=False)
|
550 |
+
|
551 |
+
# Re-shape after all-to-all: ecm -> gecm
|
552 |
+
dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model)
|
553 |
+
|
554 |
+
expert_output = self.experts(dispatched_input)
|
555 |
+
|
556 |
+
if self.wall_clock_breakdown:
|
557 |
+
self.timers(SECOND_ALLTOALL_TIMER).start()
|
558 |
+
|
559 |
+
expert_output = _AllToAll.apply(self.ep_group, expert_output)
|
560 |
+
|
561 |
+
if self.wall_clock_breakdown:
|
562 |
+
self.timers(SECOND_ALLTOALL_TIMER).stop()
|
563 |
+
self.time_salltoall = self.timers(SECOND_ALLTOALL_TIMER).elapsed(reset=False)
|
564 |
+
|
565 |
+
# Re-shape back: gecm -> ecm
|
566 |
+
expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model)
|
567 |
+
|
568 |
+
if groups._get_expert_model_parallel_world_size() == 1:
|
569 |
+
# the dropped duplicate tokens need to be gathered on each
|
570 |
+
# tensor parallel rank again for the tensor-parallel
|
571 |
+
# non-expert of the next layer.
|
572 |
+
expert_output = gather_tokens(expert_output, dim=1)
|
573 |
+
|
574 |
+
if self.use_tutel:
|
575 |
+
combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M))
|
576 |
+
else:
|
577 |
+
combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output)
|
578 |
+
|
579 |
+
a = combined_output.reshape(input[0].shape)
|
580 |
+
|
581 |
+
if self.wall_clock_breakdown:
|
582 |
+
self.timers(MOE_TIMER).stop()
|
583 |
+
self.time_moe = self.timers(MOE_TIMER).elapsed(reset=False)
|
584 |
+
|
585 |
+
return a
|
venv/lib/python3.10/site-packages/deepspeed/moe/utils.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from collections import defaultdict
|
7 |
+
from typing import Any, Dict, List, Set, Tuple, Union, cast
|
8 |
+
|
9 |
+
import torch
|
10 |
+
from torch import nn
|
11 |
+
|
12 |
+
from .layer import MoE
|
13 |
+
|
14 |
+
|
15 |
+
def has_moe_layers(m: nn.Module) -> Tuple[bool, int]:
|
16 |
+
has_moe = False
|
17 |
+
num_experts = 0
|
18 |
+
|
19 |
+
for module in m.modules():
|
20 |
+
if isinstance(module, MoE):
|
21 |
+
has_moe = True
|
22 |
+
num_experts = module.num_experts
|
23 |
+
break
|
24 |
+
return has_moe, num_experts
|
25 |
+
|
26 |
+
|
27 |
+
def is_moe_param(param: torch.Tensor) -> bool:
|
28 |
+
if hasattr(param, "allreduce") and not param.allreduce:
|
29 |
+
return True
|
30 |
+
return False
|
31 |
+
|
32 |
+
|
33 |
+
def split_params_into_shared_and_expert_params(
|
34 |
+
params: List[torch.nn.Parameter]) -> Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]:
|
35 |
+
shared_params: List[nn.Parameter] = []
|
36 |
+
expert_params: List[nn.Parameter] = []
|
37 |
+
|
38 |
+
for p in params:
|
39 |
+
if is_moe_param(p):
|
40 |
+
expert_params.append(p)
|
41 |
+
else:
|
42 |
+
shared_params.append(p)
|
43 |
+
return shared_params, expert_params
|
44 |
+
|
45 |
+
|
46 |
+
def split_params_grads_into_shared_and_expert_params(
|
47 |
+
group: List[torch.nn.Parameter]) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
|
48 |
+
"""Split grad of parameters into grads of non-expert params
|
49 |
+
and grads of expert params. This is useful while computing
|
50 |
+
grad-norms for clipping and overflow detection
|
51 |
+
|
52 |
+
group (List[torch.nn.Parameter]):
|
53 |
+
Args:
|
54 |
+
The group of parameters to split
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
Tuple[List[torch.Tensor], List[torch.Tensor]]:
|
58 |
+
list of gradients for non MoE params, list of gradients of MoE params
|
59 |
+
"""
|
60 |
+
expert_grads: List[torch.Tensor] = []
|
61 |
+
shared_grads: List[torch.Tensor] = []
|
62 |
+
|
63 |
+
for p in group:
|
64 |
+
if p.grad is not None:
|
65 |
+
if is_moe_param(p):
|
66 |
+
expert_grads.append(p.grad.to(p.dtype))
|
67 |
+
else:
|
68 |
+
shared_grads.append(p.grad.to(p.dtype))
|
69 |
+
return shared_grads, expert_grads
|
70 |
+
|
71 |
+
|
72 |
+
def split_params_into_different_moe_groups_for_optimizer(
|
73 |
+
param_groups: Union[Dict[str, Any], Tuple[Dict[str, Any], ...], List[Dict[str, Any]]],
|
74 |
+
max_group_size: Union[int, float] = 178956971) -> List[Dict[str, Any]]:
|
75 |
+
"""Split parameters into different MoE groups for optimizer
|
76 |
+
|
77 |
+
Args:
|
78 |
+
param_groups (Union[Dict[str, Any], Tuple[Dict[str, Any], ...], List[Dict[str, Any]]])
|
79 |
+
The list of parameter groups to split
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
List[Dict[str, Any]]:
|
83 |
+
list of MoE/non-MoE groups for optimizer
|
84 |
+
"""
|
85 |
+
if isinstance(param_groups, tuple):
|
86 |
+
param_groups = list(param_groups) # Tuple cannot be modified
|
87 |
+
elif isinstance(param_groups, dict):
|
88 |
+
param_groups = [param_groups]
|
89 |
+
elif not isinstance(param_groups, list):
|
90 |
+
raise ValueError(f"Unknown param group type of {type(param_groups)}")
|
91 |
+
|
92 |
+
# gather all data parallel group names
|
93 |
+
data_parallel_group_names: Set[str] = set()
|
94 |
+
for param_group in param_groups:
|
95 |
+
for param in cast(List[nn.Parameter], param_group["params"]):
|
96 |
+
if is_moe_param(param):
|
97 |
+
data_parallel_group_names.add(param.group_name)
|
98 |
+
|
99 |
+
# Create the param MoE groups, leave param assign to next step
|
100 |
+
group_moe: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(lambda: defaultdict(dict))
|
101 |
+
for param_group in param_groups:
|
102 |
+
for key in data_parallel_group_names:
|
103 |
+
group_moe[param_group['name']][key] = {
|
104 |
+
**param_group,
|
105 |
+
'name': key,
|
106 |
+
'moe': True,
|
107 |
+
'params': [],
|
108 |
+
}
|
109 |
+
|
110 |
+
# Assign param
|
111 |
+
for param_group in param_groups:
|
112 |
+
new_params: List[nn.Parameter] = []
|
113 |
+
|
114 |
+
for param in cast(List[nn.Parameter], param_group['params']):
|
115 |
+
if is_moe_param(param):
|
116 |
+
group_moe[param_group['name']][param.group_name]['params'].append(param)
|
117 |
+
else:
|
118 |
+
new_params.append(param)
|
119 |
+
param_group['params'] = new_params
|
120 |
+
|
121 |
+
# Flatten the moe groups
|
122 |
+
if max_group_size is not None:
|
123 |
+
for moe_group in group_moe.values():
|
124 |
+
for param_group in moe_group.values():
|
125 |
+
cur_group: List[nn.Parameter] = []
|
126 |
+
all_groups: List[List[nn.Parameter]] = []
|
127 |
+
size_of_cur_group = 0
|
128 |
+
|
129 |
+
for param in cast(List[nn.Parameter], param_group['params']):
|
130 |
+
if size_of_cur_group + param.numel() <= max_group_size:
|
131 |
+
cur_group.append(param)
|
132 |
+
size_of_cur_group += param.numel()
|
133 |
+
else:
|
134 |
+
all_groups.append(cur_group)
|
135 |
+
cur_group = [param]
|
136 |
+
size_of_cur_group = param.numel()
|
137 |
+
|
138 |
+
if cur_group:
|
139 |
+
all_groups.append(cur_group)
|
140 |
+
|
141 |
+
for group in all_groups:
|
142 |
+
param_groups.append({**param_group, 'params': group})
|
143 |
+
else:
|
144 |
+
for moe_group in group_moe.values():
|
145 |
+
for param_group in moe_group.values():
|
146 |
+
param_groups.append(param_group)
|
147 |
+
|
148 |
+
return param_groups
|
149 |
+
|
150 |
+
|
151 |
+
def is_moe_param_group(param_group):
|
152 |
+
return param_group.get('moe', False)
|
153 |
+
|
154 |
+
|
155 |
+
def configure_moe_param_groups(model_parameters: List):
|
156 |
+
assert isinstance(model_parameters, list), "model_parameters must be a list"
|
157 |
+
|
158 |
+
for p in model_parameters:
|
159 |
+
# match torch.optim.Optimizer expectations,
|
160 |
+
# see: https://github.com/pytorch/pytorch/blob/2ffab6e663b9c6951048b8c8ba82d2cc5ca5c2fc/torch/optim/optimizer.py#L270-L272
|
161 |
+
if not isinstance(p, (torch.Tensor, dict)):
|
162 |
+
raise TypeError("param argument that would be given to the optimizer should be "
|
163 |
+
f"an iterable of Tensors or dicts, but got {type(p)}")
|
164 |
+
|
165 |
+
# peak at the first element to determine how to proceed
|
166 |
+
first = model_parameters[0]
|
167 |
+
|
168 |
+
# Case 1: model_parameters is a list of torch.nn.Parameter
|
169 |
+
# -> need to create moe compatible param groups
|
170 |
+
if isinstance(first, torch.nn.Parameter):
|
171 |
+
param_group = {'params': model_parameters, 'name': 'dense-params'}
|
172 |
+
return split_params_into_different_moe_groups_for_optimizer(param_group)
|
173 |
+
|
174 |
+
# Case 2: model_parameters is a list of param groups List[dict]
|
175 |
+
# -> moe compatible param groups might already exist, if not create them
|
176 |
+
elif isinstance(first, dict):
|
177 |
+
#there are no moe groups created
|
178 |
+
if not any(['moe' in param_group for param_group in model_parameters]):
|
179 |
+
return split_params_into_different_moe_groups_for_optimizer(model_parameters)
|
180 |
+
else:
|
181 |
+
# moe groups exist, nothing to do
|
182 |
+
return model_parameters
|
venv/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.63 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc
ADDED
Binary file (604 Bytes). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc
ADDED
Binary file (11.1 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc
ADDED
Binary file (15.6 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc
ADDED
Binary file (1.27 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc
ADDED
Binary file (20.2 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc
ADDED
Binary file (2.66 kB). View file
|
|
venv/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc
ADDED
Binary file (5.37 kB). View file
|
|