Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__init__.py +7 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/backend.py +47 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py +18 -0
- venv/lib/python3.10/site-packages/deepspeed/comm/torch.py +392 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/inject.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/layers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/load_checkpoint.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_module.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/tp_shard.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__init__.py +21 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bert.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bloom.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/clip.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/distil_bert.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gpt2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptj.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneo.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneox.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/internlm.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt_moe.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/unet.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/vae.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base.py +322 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base_moe.py +130 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bert.py +93 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bloom.py +131 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/clip.py +73 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/distil_bert.py +82 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__init__.py +9 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/gated_mlp.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_engine.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_megatron.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/megatron.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/meta_tensor.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -130,3 +130,6 @@ lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filt
|
|
| 130 |
venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 131 |
venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
|
| 132 |
venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 131 |
venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
|
| 132 |
venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filter=lfs diff=lfs merge=lfs -text
|
| 133 |
+
venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 134 |
+
venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 135 |
+
venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
venv/lib/python3.10/site-packages/deepspeed/comm/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .utils import *
|
| 7 |
+
from .comm import *
|
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (220 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (1.04 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc
ADDED
|
Binary file (526 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (2.72 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/comm/backend.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
"""
|
| 6 |
+
DeepSpeed Communication Backend.
|
| 7 |
+
|
| 8 |
+
In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now.
|
| 9 |
+
|
| 10 |
+
# Custom DS Backends -- Direct C/Ops
|
| 11 |
+
- NCCL -- [EXPERIMENTAL]
|
| 12 |
+
- MPI -- [EXPERIMENTAL]
|
| 13 |
+
- RCCL -- [EXPERIMENTAL]
|
| 14 |
+
- GLOO -- [EXPERIMENTAL]
|
| 15 |
+
# DS backend wrapper for torch.distributed [DEFAULT]
|
| 16 |
+
- T-NCCL -- [DEFAULT]
|
| 17 |
+
- T-GLOO
|
| 18 |
+
- T-MPI
|
| 19 |
+
|
| 20 |
+
# Backend is the base class
|
| 21 |
+
-- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class Backend(object):
|
| 26 |
+
|
| 27 |
+
def __init__(self, name='backend', rank=0, size=1):
|
| 28 |
+
self.name = name
|
| 29 |
+
# The world size and rank of the world process group
|
| 30 |
+
self.world_group = None
|
| 31 |
+
self.world_size = size
|
| 32 |
+
self.world_rank = rank
|
| 33 |
+
# Single process group (pg) implementation for now but keep a list for future
|
| 34 |
+
self.process_groups = []
|
| 35 |
+
self.initialized = False
|
| 36 |
+
|
| 37 |
+
def is_initialized(self):
|
| 38 |
+
return self.initialized
|
| 39 |
+
|
| 40 |
+
def new_group(self):
|
| 41 |
+
# create a new pg and add it to pg list
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
def init_process_group(self):
|
| 45 |
+
# subclasses will initialize them fully
|
| 46 |
+
# - initialize a default world process group and add it to pg list
|
| 47 |
+
self.initialized = True
|
venv/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from enum import Enum
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ReduceOp(Enum):
|
| 10 |
+
SUM = 0
|
| 11 |
+
PRODUCT = 1
|
| 12 |
+
MIN = 2
|
| 13 |
+
MAX = 3
|
| 14 |
+
BAND = 4
|
| 15 |
+
BOR = 5
|
| 16 |
+
BXOR = 6
|
| 17 |
+
AVG = 7
|
| 18 |
+
UNUSED = 8
|
venv/lib/python3.10/site-packages/deepspeed/comm/torch.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
import deepspeed
|
| 7 |
+
from deepspeed import utils
|
| 8 |
+
|
| 9 |
+
from .utils import *
|
| 10 |
+
from .backend import *
|
| 11 |
+
from .comm import *
|
| 12 |
+
from ..runtime import compiler
|
| 13 |
+
from deepspeed.utils.torch import required_torch_version
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
DS_COMM_ALL_GATHER_OFF = False
|
| 17 |
+
DS_COMM_REDUCE_SCATTER_OFF = False
|
| 18 |
+
DS_COMM_BROADCAST_OFF = False
|
| 19 |
+
DS_COMM_ALL_REDUCE_OFF = False
|
| 20 |
+
DS_COMM_REDUCE_OFF = False
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def build_shm_op():
|
| 24 |
+
builder = get_accelerator().create_op_builder("ShareMemCommBuilder")
|
| 25 |
+
if builder is None or not deepspeed.ops.__compatible_ops__[builder.NAME]:
|
| 26 |
+
return None
|
| 27 |
+
shm_cpp_module = builder.load()
|
| 28 |
+
print(f'DeepSpeed {builder.absolute_name()} built successfully')
|
| 29 |
+
return shm_cpp_module
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def has_coalescing_manager():
|
| 33 |
+
has_c10d = hasattr(torch.distributed, 'distributed_c10d')
|
| 34 |
+
return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def has_all_reduce_coalesced():
|
| 38 |
+
return hasattr(torch.distributed, "all_reduce_coalesced") and required_torch_version(min_version=1.13)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def get_coalescing_manager(group, device, reqs, async_op):
|
| 42 |
+
if required_torch_version(min_version=2.0, max_version=2.0):
|
| 43 |
+
return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs)
|
| 44 |
+
elif required_torch_version(min_version=2.1):
|
| 45 |
+
return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, async_ops=async_op)
|
| 46 |
+
else:
|
| 47 |
+
return torch.distributed.distributed_c10d._coalescing_manager(group, reqs)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
##Utilities to turn comm off
|
| 51 |
+
##TODO: move to base comm (wrapper)
|
| 52 |
+
def all_gather_comm_off(flag=False):
|
| 53 |
+
global DS_COMM_ALL_GATHER_OFF
|
| 54 |
+
DS_COMM_ALL_GATHER_OFF = flag
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def reduce_scatter_comm_off(flag=False):
|
| 58 |
+
global DS_COMM_REDUCE_SCATTER_OFF
|
| 59 |
+
DS_COMM_REDUCE_SCATTER_OFF = flag
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def broadcast_comm_off(flag=False):
|
| 63 |
+
global DS_COMM_BROADCAST_OFF
|
| 64 |
+
DS_COMM_BROADCAST_OFF = flag
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def all_reduce_comm_off(flag=False):
|
| 68 |
+
global DS_COMM_ALL_REDUCE_OFF
|
| 69 |
+
DS_COMM_ALL_REDUCE_OFF = flag
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def reduce_comm_off(flag=False):
|
| 73 |
+
global DS_COMM_REDUCE_OFF
|
| 74 |
+
DS_COMM_REDUCE_OFF = flag
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
#assumption: all_gather and reduce scatter
|
| 78 |
+
## are what we care about
|
| 79 |
+
def backward_comm_off(flag=False):
|
| 80 |
+
all_gather_comm_off(flag)
|
| 81 |
+
reduce_scatter_comm_off(flag)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class Noop:
|
| 85 |
+
|
| 86 |
+
def wait(self):
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class TorchBackend(Backend):
|
| 91 |
+
"""
|
| 92 |
+
A light-weight wrapper class for torch.distributed API.
|
| 93 |
+
Only a subset of functions are wrapped. Once the init_process_group
|
| 94 |
+
is initialized, standard torch.distributed.* can be used directly
|
| 95 |
+
so no need to wrap all the functions. We can keep adding wrappers as
|
| 96 |
+
needed.
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'):
|
| 100 |
+
super(TorchBackend, self).__init__()
|
| 101 |
+
self.shm_comm_op = build_shm_op()
|
| 102 |
+
self.has_all_reduce_coalesced = has_all_reduce_coalesced()
|
| 103 |
+
self.has_coalescing_manager = has_coalescing_manager()
|
| 104 |
+
self.all_gather_function = self.get_all_gather_function()
|
| 105 |
+
self.reduce_scatter_function = self.get_reduce_scatter_function()
|
| 106 |
+
self.initialized = True
|
| 107 |
+
self.name = name
|
| 108 |
+
# Future functionality to support ds.initialize() on a single GPU
|
| 109 |
+
# The idea is to fake that dist backend is initialized even when
|
| 110 |
+
# it is not so we can run on a single GPU without doing any init_process_group
|
| 111 |
+
self.single_gpu_mode = True
|
| 112 |
+
self.init_process_group(backend, timeout, init_method, rank, world_size)
|
| 113 |
+
if self.shm_comm_op != None:
|
| 114 |
+
self.shm_comm_op.initialize(self.get_world_size(), self.get_rank())
|
| 115 |
+
|
| 116 |
+
@classmethod
|
| 117 |
+
@compiler.disable
|
| 118 |
+
def get_all_gather_function(self):
|
| 119 |
+
if hasattr(torch.distributed, "all_gather_into_tensor"):
|
| 120 |
+
return torch.distributed.all_gather_into_tensor
|
| 121 |
+
elif hasattr(torch.distributed, "_all_gather_base"):
|
| 122 |
+
return torch.distributed._all_gather_base
|
| 123 |
+
return None
|
| 124 |
+
|
| 125 |
+
@classmethod
|
| 126 |
+
@compiler.disable
|
| 127 |
+
def get_reduce_scatter_function(self):
|
| 128 |
+
if hasattr(torch.distributed, "reduce_scatter_tensor"):
|
| 129 |
+
return torch.distributed.reduce_scatter_tensor
|
| 130 |
+
elif hasattr(torch.distributed, "_reduce_scatter_base"):
|
| 131 |
+
return torch.distributed._reduce_scatter_base
|
| 132 |
+
return None
|
| 133 |
+
|
| 134 |
+
def has_all_gather_into_tensor(self):
|
| 135 |
+
return self.all_gather_function is not None
|
| 136 |
+
|
| 137 |
+
def has_reduce_scatter_tensor(self):
|
| 138 |
+
return self.reduce_scatter_function is not None
|
| 139 |
+
|
| 140 |
+
def init_process_group(self, backend, timeout, init_method, rank, world_size):
|
| 141 |
+
if not torch.distributed.is_initialized():
|
| 142 |
+
torch.distributed.init_process_group(backend,
|
| 143 |
+
timeout=timeout,
|
| 144 |
+
init_method=init_method,
|
| 145 |
+
rank=rank,
|
| 146 |
+
world_size=world_size)
|
| 147 |
+
self.using_mpi = torch.distributed.get_backend() == 'mpi'
|
| 148 |
+
|
| 149 |
+
@compiler.disable
|
| 150 |
+
def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
|
| 151 |
+
op = self._reduce_op(op)
|
| 152 |
+
return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
|
| 153 |
+
|
| 154 |
+
@compiler.disable
|
| 155 |
+
def inference_all_reduce(self, tensor, op, group=None):
|
| 156 |
+
if self.shm_comm_op == None or self.shm_comm_op.inference_all_reduce(tensor, op) == -1:
|
| 157 |
+
op = self._reduce_op(op)
|
| 158 |
+
return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=False)
|
| 159 |
+
|
| 160 |
+
@compiler.disable
|
| 161 |
+
def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
|
| 162 |
+
""" proxy func to torch.distributed.all_reduce_coalesced,
|
| 163 |
+
which is included in PyTorch 1.13 and above
|
| 164 |
+
"""
|
| 165 |
+
if not self.has_all_reduce_coalesced:
|
| 166 |
+
raise RuntimeError(f"Current torch version does not have all_reduce_coalesced "
|
| 167 |
+
f"api (torch.__version__: {torch.__version__})")
|
| 168 |
+
op = self._reduce_op(op)
|
| 169 |
+
return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op)
|
| 170 |
+
|
| 171 |
+
@compiler.disable
|
| 172 |
+
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
|
| 173 |
+
if DS_COMM_REDUCE_OFF:
|
| 174 |
+
if int(os.getenv('RANK', '0')) == 0:
|
| 175 |
+
utils.logger.warning("REDUCE is OFF")
|
| 176 |
+
return Noop()
|
| 177 |
+
return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op)
|
| 178 |
+
|
| 179 |
+
@compiler.disable
|
| 180 |
+
def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
|
| 181 |
+
if DS_COMM_REDUCE_SCATTER_OFF:
|
| 182 |
+
if int(os.getenv('RANK', '0')) == 0:
|
| 183 |
+
utils.logger.warning("REDUCE SCATTER is OFF")
|
| 184 |
+
return Noop()
|
| 185 |
+
else:
|
| 186 |
+
return torch.distributed.reduce_scatter(output=output,
|
| 187 |
+
input_list=input_list,
|
| 188 |
+
op=self._reduce_op(op),
|
| 189 |
+
group=group,
|
| 190 |
+
async_op=async_op)
|
| 191 |
+
|
| 192 |
+
@compiler.disable
|
| 193 |
+
def broadcast(self, tensor, src, group=None, async_op=False):
|
| 194 |
+
if DS_COMM_BROADCAST_OFF:
|
| 195 |
+
if int(os.getenv('RANK', '0')) == 0:
|
| 196 |
+
utils.logger.warning("BROADCAST is OFF")
|
| 197 |
+
return Noop()
|
| 198 |
+
else:
|
| 199 |
+
return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
|
| 200 |
+
|
| 201 |
+
@compiler.disable
|
| 202 |
+
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
|
| 203 |
+
if DS_COMM_ALL_GATHER_OFF:
|
| 204 |
+
if int(os.getenv('RANK', '0')) == 0:
|
| 205 |
+
utils.logger.warning("All Gather is OFF")
|
| 206 |
+
return Noop()
|
| 207 |
+
else:
|
| 208 |
+
return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
|
| 209 |
+
|
| 210 |
+
@compiler.disable
|
| 211 |
+
def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
|
| 212 |
+
if self.has_all_gather_into_tensor():
|
| 213 |
+
return self.all_gather_function(output_tensor=output_tensor,
|
| 214 |
+
input_tensor=input_tensor,
|
| 215 |
+
group=group,
|
| 216 |
+
async_op=async_op)
|
| 217 |
+
|
| 218 |
+
@compiler.disable
|
| 219 |
+
def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False):
|
| 220 |
+
if DS_COMM_ALL_GATHER_OFF:
|
| 221 |
+
if int(os.getenv('RANK', '0')) == 0:
|
| 222 |
+
utils.logger.warning("All Gather is OFF")
|
| 223 |
+
return Noop()
|
| 224 |
+
else:
|
| 225 |
+
if self.has_allgather_base:
|
| 226 |
+
return torch.distributed.distributed_c10d._all_gather_base(output_tensor=output_tensor,
|
| 227 |
+
input_tensor=input_tensor,
|
| 228 |
+
group=group,
|
| 229 |
+
async_op=async_op)
|
| 230 |
+
else:
|
| 231 |
+
utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to "
|
| 232 |
+
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
|
| 233 |
+
"please consider upgrading your pytorch installation.")
|
| 234 |
+
pass
|
| 235 |
+
|
| 236 |
+
@compiler.disable
|
| 237 |
+
def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False):
|
| 238 |
+
""""""
|
| 239 |
+
assert len(output_tensors) == len(input_tensors), ""
|
| 240 |
+
if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'):
|
| 241 |
+
# customized PyTorch
|
| 242 |
+
return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors,
|
| 243 |
+
input_tensors,
|
| 244 |
+
group=group,
|
| 245 |
+
async_op=async_op)
|
| 246 |
+
elif has_coalescing_manager():
|
| 247 |
+
reqs = []
|
| 248 |
+
with get_coalescing_manager(group, input_tensors[0].device, reqs, async_op):
|
| 249 |
+
for output, input in zip(output_tensors, input_tensors):
|
| 250 |
+
handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output,
|
| 251 |
+
input,
|
| 252 |
+
group=group,
|
| 253 |
+
async_op=True)
|
| 254 |
+
reqs.append(handle)
|
| 255 |
+
if async_op:
|
| 256 |
+
return reqs[-1]
|
| 257 |
+
else:
|
| 258 |
+
reqs[-1].wait()
|
| 259 |
+
|
| 260 |
+
@compiler.disable
|
| 261 |
+
def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False):
|
| 262 |
+
if self.has_reduce_scatter_tensor():
|
| 263 |
+
return self.reduce_scatter_function(output_tensor,
|
| 264 |
+
input_tensor,
|
| 265 |
+
op=self._reduce_op(op),
|
| 266 |
+
group=group,
|
| 267 |
+
async_op=async_op)
|
| 268 |
+
else:
|
| 269 |
+
utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
|
| 270 |
+
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
|
| 271 |
+
"please consider upgrading your pytorch installation.")
|
| 272 |
+
pass
|
| 273 |
+
|
| 274 |
+
@compiler.disable
|
| 275 |
+
def all_to_all_single(self,
|
| 276 |
+
output,
|
| 277 |
+
input,
|
| 278 |
+
output_split_sizes=None,
|
| 279 |
+
input_split_sizes=None,
|
| 280 |
+
group=None,
|
| 281 |
+
async_op=False):
|
| 282 |
+
return torch.distributed.all_to_all_single(output=output,
|
| 283 |
+
input=input,
|
| 284 |
+
output_split_sizes=output_split_sizes,
|
| 285 |
+
input_split_sizes=input_split_sizes,
|
| 286 |
+
group=group,
|
| 287 |
+
async_op=async_op)
|
| 288 |
+
|
| 289 |
+
@compiler.disable
|
| 290 |
+
def all_to_all(self, output_tensor_list, input_tensor_list, group=None, async_op=False):
|
| 291 |
+
return torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op)
|
| 292 |
+
|
| 293 |
+
@compiler.disable
|
| 294 |
+
def send(self, tensor, dst, group=None, tag=0):
|
| 295 |
+
return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
|
| 296 |
+
|
| 297 |
+
@compiler.disable
|
| 298 |
+
def recv(self, tensor, src=None, group=None, tag=0):
|
| 299 |
+
return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
|
| 300 |
+
|
| 301 |
+
@compiler.disable
|
| 302 |
+
def isend(self, tensor, dst, group=None, tag=0):
|
| 303 |
+
return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
|
| 304 |
+
|
| 305 |
+
@compiler.disable
|
| 306 |
+
def irecv(self, tensor, src=None, group=None, tag=0):
|
| 307 |
+
return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
|
| 308 |
+
|
| 309 |
+
@compiler.disable
|
| 310 |
+
def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
|
| 311 |
+
return torch.distributed.gather(tensor=tensor,
|
| 312 |
+
gather_list=gather_list,
|
| 313 |
+
dst=dst,
|
| 314 |
+
group=group,
|
| 315 |
+
async_op=async_op)
|
| 316 |
+
|
| 317 |
+
@compiler.disable
|
| 318 |
+
def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
|
| 319 |
+
return torch.distributed.scatter(tensor=tensor,
|
| 320 |
+
scatter_list=scatter_list,
|
| 321 |
+
src=src,
|
| 322 |
+
group=group,
|
| 323 |
+
async_op=async_op)
|
| 324 |
+
|
| 325 |
+
@compiler.disable
|
| 326 |
+
def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None):
|
| 327 |
+
if group is None:
|
| 328 |
+
group = torch.distributed.GroupMember.WORLD
|
| 329 |
+
return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids)
|
| 330 |
+
|
| 331 |
+
@compiler.disable
|
| 332 |
+
def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False):
|
| 333 |
+
if group is None:
|
| 334 |
+
group = torch.distributed.GroupMember.WORLD
|
| 335 |
+
return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
|
| 336 |
+
|
| 337 |
+
def get_rank(self, group=None):
|
| 338 |
+
return torch.distributed.get_rank(group=group)
|
| 339 |
+
|
| 340 |
+
def get_world_size(self, group=None):
|
| 341 |
+
return torch.distributed.get_world_size(group=group)
|
| 342 |
+
|
| 343 |
+
def is_initialized(self):
|
| 344 |
+
return torch.distributed.is_initialized()
|
| 345 |
+
|
| 346 |
+
def get_backend(self, group=None):
|
| 347 |
+
return torch.distributed.get_backend(group=group)
|
| 348 |
+
|
| 349 |
+
def new_group(self, ranks):
|
| 350 |
+
return torch.distributed.new_group(ranks)
|
| 351 |
+
|
| 352 |
+
def get_global_rank(self, group, group_rank):
|
| 353 |
+
if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
|
| 354 |
+
from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
|
| 355 |
+
else:
|
| 356 |
+
from torch.distributed.distributed_c10d import _get_global_rank
|
| 357 |
+
return _get_global_rank(group, group_rank)
|
| 358 |
+
|
| 359 |
+
def get_world_group(self):
|
| 360 |
+
return torch.distributed.group.WORLD
|
| 361 |
+
|
| 362 |
+
def destroy_process_group(self, group=None):
|
| 363 |
+
return torch.distributed.destroy_process_group(group=group)
|
| 364 |
+
|
| 365 |
+
def _reduce_op(self, op):
|
| 366 |
+
'''
|
| 367 |
+
Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
|
| 368 |
+
'''
|
| 369 |
+
if not isinstance(op, torch.distributed.ReduceOp):
|
| 370 |
+
if op == ReduceOp.SUM:
|
| 371 |
+
op = torch.distributed.ReduceOp.SUM
|
| 372 |
+
elif op == ReduceOp.PRODUCT:
|
| 373 |
+
op = torch.distributed.ReduceOp.PRODUCT
|
| 374 |
+
elif op == ReduceOp.AVG:
|
| 375 |
+
op = torch.distributed.ReduceOp.AVG
|
| 376 |
+
elif op == ReduceOp.MIN:
|
| 377 |
+
op = torch.distributed.ReduceOp.MIN
|
| 378 |
+
elif op == ReduceOp.MAX:
|
| 379 |
+
op = torch.distributed.ReduceOp.MAX
|
| 380 |
+
elif op == ReduceOp.BAND:
|
| 381 |
+
op = torch.distributed.ReduceOp.BAND
|
| 382 |
+
elif op == ReduceOp.BOR:
|
| 383 |
+
op = torch.distributed.ReduceOp.BOR
|
| 384 |
+
elif op == ReduceOp.BXOR:
|
| 385 |
+
op = torch.distributed.ReduceOp.BXOR
|
| 386 |
+
return op
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
# This will become a light-weight wrapper around torch.distributed functions
|
| 390 |
+
# TODO: create some example to show how this wrapper can help profile communication
|
| 391 |
+
# TODO: make sure there is no performance regression with this approach
|
| 392 |
+
# TODO: explore monkey-patching if this does not work
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (652 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/inject.cpython-310.pyc
ADDED
|
Binary file (2.56 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/layers.cpython-310.pyc
ADDED
|
Binary file (5.66 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/load_checkpoint.cpython-310.pyc
ADDED
|
Binary file (9.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/policy.cpython-310.pyc
ADDED
|
Binary file (5.66 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_module.cpython-310.pyc
ADDED
|
Binary file (19.1 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_policy.cpython-310.pyc
ADDED
|
Binary file (888 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/tp_shard.cpython-310.pyc
ADDED
|
Binary file (1.35 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .bert import DS_BERTContainer, HFBertLayerPolicy
|
| 7 |
+
from .bloom import DS_BloomContainer, BLOOMLayerPolicy, supported_models
|
| 8 |
+
from .distil_bert import DS_DistilBERTContainer, HFDistilBertLayerPolicy
|
| 9 |
+
from .gpt2 import DS_GPT2Container, HFGPT2LayerPolicy
|
| 10 |
+
from .gptj import DS_GPTJContainer, HFGPTJLayerPolicy
|
| 11 |
+
from .gptneo import DS_GPTNEOContainer, HFGPTNEOLayerPolicy
|
| 12 |
+
from .gptneox import DS_GPTNEOXContainer, GPTNEOXLayerPolicy
|
| 13 |
+
from .llama import DS_LLAMAContainer, LLAMALayerPolicy
|
| 14 |
+
from .llama2 import LLAMA2LayerPolicy, DS_LLAMA2Container
|
| 15 |
+
from .internlm import DS_InternLMContainer, InternLMLayerPolicy
|
| 16 |
+
from .megatron_gpt import DS_MegatronGPTContainer, MegatronLayerPolicy
|
| 17 |
+
from .megatron_gpt_moe import DS_MegatronGPTMoEContainer, MegatronMoELayerPolicy
|
| 18 |
+
from .opt import DS_OPTContainer, HFOPTLayerPolicy
|
| 19 |
+
from .clip import DS_CLIPContainer, HFCLIPLayerPolicy
|
| 20 |
+
from .unet import UNetPolicy
|
| 21 |
+
from .vae import VAEPolicy
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.42 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bert.cpython-310.pyc
ADDED
|
Binary file (3.26 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bloom.cpython-310.pyc
ADDED
|
Binary file (5.02 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/clip.cpython-310.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/distil_bert.cpython-310.pyc
ADDED
|
Binary file (3.09 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gpt2.cpython-310.pyc
ADDED
|
Binary file (2.39 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptj.cpython-310.pyc
ADDED
|
Binary file (4.84 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneo.cpython-310.pyc
ADDED
|
Binary file (5.25 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneox.cpython-310.pyc
ADDED
|
Binary file (4.91 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/internlm.cpython-310.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama.cpython-310.pyc
ADDED
|
Binary file (5.86 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama2.cpython-310.pyc
ADDED
|
Binary file (5.74 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt.cpython-310.pyc
ADDED
|
Binary file (4.39 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt_moe.cpython-310.pyc
ADDED
|
Binary file (3.54 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/unet.cpython-310.pyc
ADDED
|
Binary file (1.83 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/vae.cpython-310.pyc
ADDED
|
Binary file (1.49 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
| 7 |
+
from abc import ABC
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
import deepspeed
|
| 12 |
+
from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig
|
| 13 |
+
from deepspeed.accelerator import get_accelerator
|
| 14 |
+
|
| 15 |
+
# If the intermediate size attribute is set DEFAULT_INTERMEDIATE_SIZE
|
| 16 |
+
# it is assumed the intermediate size is 4x the embedding dimension
|
| 17 |
+
DEFAULT_INTERMEDIATE_SIZE = -1
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class BaseConvolutionContainer(ABC):
|
| 21 |
+
# not implemented
|
| 22 |
+
def __init__(self):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BaseTransformerContainer(ABC):
|
| 27 |
+
|
| 28 |
+
def __init__(self, policy, config, model_config, layer_id, child):
|
| 29 |
+
self.policy = policy
|
| 30 |
+
self.config = config
|
| 31 |
+
self.model_config = model_config
|
| 32 |
+
self.layer_id = layer_id
|
| 33 |
+
self.child = child
|
| 34 |
+
|
| 35 |
+
self.megatron_v2 = self.policy.is_megatron_v2
|
| 36 |
+
self.scale_attention = self.policy.scale_attention
|
| 37 |
+
self.ckpt_load_enabled = False
|
| 38 |
+
|
| 39 |
+
# configuration for models. todo: can this be moved to a pydantic model config?
|
| 40 |
+
self.hidden_size = None
|
| 41 |
+
self.intermediate_size = None
|
| 42 |
+
self.num_attention_heads = None
|
| 43 |
+
self.mp_size = self.config.tensor_parallel.tp_size
|
| 44 |
+
self.pre_layer_norm = self.model_config.do_layer_norm_before if \
|
| 45 |
+
hasattr(self.model_config, 'do_layer_norm_before') else self.policy.pre_attn_norm
|
| 46 |
+
self.dtype = self.config.dtype
|
| 47 |
+
self.attn_linear_layer = self.policy.linear_layer
|
| 48 |
+
self.mlp_linear_layer = self.policy.linear_layer
|
| 49 |
+
self.return_tuple = self.config.return_tuple
|
| 50 |
+
self.triangular_masking = True
|
| 51 |
+
self.local_attention = ((self.model_config.attention_layers[self.layer_id] == "local") if hasattr(
|
| 52 |
+
self.model_config, 'attention_layers') else False)
|
| 53 |
+
self.window_size = getattr(self.model_config, "window_size", 1)
|
| 54 |
+
self.mlp_act_func_type = self.policy.mlp_act_func_type
|
| 55 |
+
self.norm_type = self.policy.norm_type
|
| 56 |
+
self.training_mp_size = self.config.training_mp_size
|
| 57 |
+
self.bigscience_bloom = False
|
| 58 |
+
self.max_out_tokens = self.config.max_out_tokens
|
| 59 |
+
self.min_out_tokens = self.config.min_out_tokens
|
| 60 |
+
self.scale_attn_by_inverse_layer_idx = getattr(self.config, "scale_attn_by_inverse_layer_idx", False)
|
| 61 |
+
self.use_mup = self.policy.use_mup
|
| 62 |
+
self.return_single_tuple = False
|
| 63 |
+
self.rotary_dim = self.get_rotary_dim()
|
| 64 |
+
self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0)
|
| 65 |
+
|
| 66 |
+
# Attention tensors
|
| 67 |
+
self.qkvw = None
|
| 68 |
+
self.qkvb = None
|
| 69 |
+
self.dense_w = None
|
| 70 |
+
self.dense_b = None
|
| 71 |
+
# MLP tensors
|
| 72 |
+
self._h4h_w = None
|
| 73 |
+
self._h4h_b = None
|
| 74 |
+
self._4hh_w = None
|
| 75 |
+
self._4hh_b = None
|
| 76 |
+
# LayerNorm tensors
|
| 77 |
+
self.attn_nw = None
|
| 78 |
+
self.attn_nb = None
|
| 79 |
+
self.input_nw = None
|
| 80 |
+
self.input_nb = None
|
| 81 |
+
|
| 82 |
+
self.mp_group = None
|
| 83 |
+
self.use_triton = False
|
| 84 |
+
|
| 85 |
+
# Triton
|
| 86 |
+
self.use_triton = config.use_triton and deepspeed.HAS_TRITON
|
| 87 |
+
|
| 88 |
+
def create_ds_model_config(self):
|
| 89 |
+
self.set_hidden_heads(*self.policy.get_hidden_heads())
|
| 90 |
+
assert self.num_attention_heads % self.mp_size == 0,\
|
| 91 |
+
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
|
| 92 |
+
"This is because the attention computation is partitioned evenly among the parallel GPUs."
|
| 93 |
+
|
| 94 |
+
self.ds_model_config = DeepSpeedInferenceConfig(
|
| 95 |
+
hidden_size=self.hidden_size,
|
| 96 |
+
intermediate_size=self.intermediate_size,
|
| 97 |
+
heads=self.num_attention_heads,
|
| 98 |
+
layer_norm_eps=self.layernorm_epsilon,
|
| 99 |
+
dtype=self.dtype,
|
| 100 |
+
pre_layer_norm=self.pre_layer_norm,
|
| 101 |
+
norm_type=self.norm_type,
|
| 102 |
+
mp_size=self.mp_size,
|
| 103 |
+
return_tuple=self.return_tuple,
|
| 104 |
+
triangular_masking=self.triangular_masking,
|
| 105 |
+
local_attention=self.local_attention,
|
| 106 |
+
window_size=self.window_size,
|
| 107 |
+
rotary_dim=self.rotary_dim,
|
| 108 |
+
mlp_after_attn=self.mlp_after_attn,
|
| 109 |
+
mlp_act_func_type=self.mlp_act_func_type,
|
| 110 |
+
training_mp_size=self.training_mp_size,
|
| 111 |
+
bigscience_bloom=self.bigscience_bloom,
|
| 112 |
+
max_out_tokens=self.max_out_tokens,
|
| 113 |
+
min_out_tokens=self.min_out_tokens,
|
| 114 |
+
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
|
| 115 |
+
use_mup=self.use_mup,
|
| 116 |
+
return_single_tuple=self.return_single_tuple,
|
| 117 |
+
set_empty_params=self.config.set_empty_params,
|
| 118 |
+
transposed_mode=self.config.transposed_mode,
|
| 119 |
+
use_triton=self.use_triton,
|
| 120 |
+
triton_autotune=self.config.triton_autotune)
|
| 121 |
+
|
| 122 |
+
if self.use_triton and deepspeed.HAS_TRITON:
|
| 123 |
+
from .bert import DS_BERTContainer
|
| 124 |
+
if not isinstance(self, DS_BERTContainer):
|
| 125 |
+
raise NotImplementedError("Triton kernels are only for BERT-like models yet")
|
| 126 |
+
|
| 127 |
+
if not self.config.triton_autotune:
|
| 128 |
+
from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul
|
| 129 |
+
fp16_matmul.skip_autotune()
|
| 130 |
+
|
| 131 |
+
return self.ds_model_config
|
| 132 |
+
|
| 133 |
+
def check_meta_tensor_support(self):
|
| 134 |
+
if hasattr(self.qkvw, 'is_meta'):
|
| 135 |
+
if self.qkvw.is_meta:
|
| 136 |
+
assert self.ckpt_load_enabled, "Meta tensors are not supported for this model currently."
|
| 137 |
+
else:
|
| 138 |
+
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
|
| 139 |
+
|
| 140 |
+
def initialize_tensors(self, enable_training=False):
|
| 141 |
+
# Set the tensors from policy (user module) to container (DS module)
|
| 142 |
+
self.set_attention(*self.policy.attention(enable_training=enable_training))
|
| 143 |
+
self.set_mlp(*self.policy.mlp(enable_training=enable_training))
|
| 144 |
+
self.set_layernorm(*self.policy.layernorm())
|
| 145 |
+
#self.check_meta_tensor_support()
|
| 146 |
+
|
| 147 |
+
def convert_to_required_dtype(self):
|
| 148 |
+
# Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy
|
| 149 |
+
if self.dtype in [torch.half, torch.bfloat16]:
|
| 150 |
+
for k, v in self.__dict__.items():
|
| 151 |
+
# The list comprehension is used for MoE tensor lists
|
| 152 |
+
if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \
|
| 153 |
+
or isinstance(tensor, torch.nn.Parameter)) for tensor in v):
|
| 154 |
+
self.__dict__[k] = [moe_tensor.to(self.dtype) for moe_tensor in v]
|
| 155 |
+
|
| 156 |
+
if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter):
|
| 157 |
+
self.__dict__[k] = v.to(self.dtype)
|
| 158 |
+
|
| 159 |
+
def get_rotary_dim(self):
|
| 160 |
+
if hasattr(self.model_config, 'rotary_dim'):
|
| 161 |
+
return self.model_config.rotary_dim
|
| 162 |
+
if hasattr(self.child, 'attention') and hasattr(self.child.attention, 'rotary_ndims'):
|
| 163 |
+
return self.child.attention.rotary_ndims
|
| 164 |
+
return -1
|
| 165 |
+
|
| 166 |
+
def set_moe(self, moe=False):
|
| 167 |
+
self.moe = moe
|
| 168 |
+
|
| 169 |
+
def set_tensor_parallel_config(self, mp_size, mp_group):
|
| 170 |
+
self.mp_size = mp_size
|
| 171 |
+
self.mp_group = mp_group
|
| 172 |
+
|
| 173 |
+
def set_quantization_config(self, quantizer):
|
| 174 |
+
self.quantizer = quantizer
|
| 175 |
+
|
| 176 |
+
def set_hidden_heads(self, hidden_size, num_attention_heads, epsilon, intermediate_size):
|
| 177 |
+
"""
|
| 178 |
+
Args:
|
| 179 |
+
hidden_size: embedding dimension of the model
|
| 180 |
+
num_attention_heads: number of attention heads in the model
|
| 181 |
+
epsilon: epsilon value for layer norm (same value used for all norms)
|
| 182 |
+
intermediate_size: Size of MLP projection. If `DEFAULT_INTERMEDIATE_SIZE` is passed
|
| 183 |
+
it is assumed to be `4 * hidden_size`
|
| 184 |
+
"""
|
| 185 |
+
self.hidden_size = hidden_size
|
| 186 |
+
if intermediate_size == DEFAULT_INTERMEDIATE_SIZE:
|
| 187 |
+
self.intermediate_size = 4 * hidden_size
|
| 188 |
+
else:
|
| 189 |
+
self.intermediate_size = intermediate_size
|
| 190 |
+
self.num_attention_heads = num_attention_heads
|
| 191 |
+
self.layernorm_epsilon = epsilon
|
| 192 |
+
|
| 193 |
+
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
|
| 194 |
+
self.qkvw = qkvw
|
| 195 |
+
self.qkvb = qkvb
|
| 196 |
+
self.dense_w = dense_w
|
| 197 |
+
self.dense_b = dense_b
|
| 198 |
+
|
| 199 |
+
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
|
| 200 |
+
self._h4h_w = _h4h_w
|
| 201 |
+
self._h4h_b = _h4h_b
|
| 202 |
+
self._4hh_w = _4hh_w
|
| 203 |
+
self._4hh_b = _4hh_b
|
| 204 |
+
|
| 205 |
+
def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb):
|
| 206 |
+
self.attn_nw = attn_nw
|
| 207 |
+
self.attn_nb = attn_nb
|
| 208 |
+
self.input_nw = input_nw
|
| 209 |
+
self.input_nb = input_nb
|
| 210 |
+
|
| 211 |
+
def apply_weight_quantization(self):
|
| 212 |
+
# quantize attention weights
|
| 213 |
+
self.attention_quantization()
|
| 214 |
+
|
| 215 |
+
# quantize mlp weights
|
| 216 |
+
self.mlp_quantization()
|
| 217 |
+
|
| 218 |
+
def attention_quantization(self):
|
| 219 |
+
self.module.attention.attn_qkvw = self.quantizer.quantize(self.module.attention.attn_qkvw)
|
| 220 |
+
self.module.attention.attn_ow = self.quantizer.quantize(self.module.attention.attn_ow)
|
| 221 |
+
|
| 222 |
+
def mlp_quantization(self):
|
| 223 |
+
self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w)
|
| 224 |
+
self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w)
|
| 225 |
+
|
| 226 |
+
def apply_tensor_parallelism(self, mp_replace):
|
| 227 |
+
# setup the new Attention module
|
| 228 |
+
self.attention_qkv_mp(mp_replace)
|
| 229 |
+
self.attention_o_mp(mp_replace)
|
| 230 |
+
|
| 231 |
+
# setup the new MLP module
|
| 232 |
+
self.mlp_inter_mp(mp_replace)
|
| 233 |
+
self.mlp_output_mp(mp_replace)
|
| 234 |
+
|
| 235 |
+
# Apply weight quantization
|
| 236 |
+
# TODO(cmikeh2): Re-enable this once verified
|
| 237 |
+
#self.apply_weight_quantization()
|
| 238 |
+
|
| 239 |
+
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
|
| 240 |
+
self.module.attention.attn_qkvw = mp_replace.strided_copy(self.module.attention.attn_qkvw,
|
| 241 |
+
self.qkvw,
|
| 242 |
+
num_splits=3,
|
| 243 |
+
int8=reversed_dim)
|
| 244 |
+
self.module.attention.attn_qkvb = mp_replace.strided_copy(self.module.attention.attn_qkvb,
|
| 245 |
+
self.qkvb,
|
| 246 |
+
num_splits=3,
|
| 247 |
+
int8=reversed_dim)
|
| 248 |
+
|
| 249 |
+
def attention_o_mp(self, mp_replace, reversed_dim=False):
|
| 250 |
+
self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, self.dense_w, int8=reversed_dim)
|
| 251 |
+
self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob,
|
| 252 |
+
self.dense_b,
|
| 253 |
+
int8=reversed_dim,
|
| 254 |
+
allocate_tensor=reversed_dim)
|
| 255 |
+
|
| 256 |
+
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
|
| 257 |
+
self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w, int8=reversed_dim)
|
| 258 |
+
self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b, int8=reversed_dim)
|
| 259 |
+
|
| 260 |
+
def mlp_output_mp(self, mp_replace, reversed_dim=False):
|
| 261 |
+
self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w, int8=reversed_dim)
|
| 262 |
+
self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b,
|
| 263 |
+
self._4hh_b,
|
| 264 |
+
int8=reversed_dim,
|
| 265 |
+
allocate_tensor=reversed_dim)
|
| 266 |
+
|
| 267 |
+
def copy_data_to_new_module(self):
|
| 268 |
+
params = {'attn_nw': self.attn_nw, 'attn_nb': self.attn_nb}
|
| 269 |
+
for key in params:
|
| 270 |
+
if params[key] is None:
|
| 271 |
+
setattr(self.module.mlp, key, None)
|
| 272 |
+
else:
|
| 273 |
+
setattr(self.module.mlp, key,
|
| 274 |
+
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
|
| 275 |
+
|
| 276 |
+
params = {'norm_w': self.input_nw, 'norm_b': self.input_nb}
|
| 277 |
+
for key in params:
|
| 278 |
+
if params[key] is None:
|
| 279 |
+
setattr(self.module, key, None)
|
| 280 |
+
else:
|
| 281 |
+
setattr(self.module, key,
|
| 282 |
+
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
|
| 283 |
+
|
| 284 |
+
def transpose(self):
|
| 285 |
+
self.transpose_attention()
|
| 286 |
+
self.transpose_mlp()
|
| 287 |
+
|
| 288 |
+
def transpose_attention(self):
|
| 289 |
+
if self.attn_linear_layer:
|
| 290 |
+
self.qkvw = self.transpose_impl(self.qkvw.data)
|
| 291 |
+
self.dense_w = self.transpose_impl(self.dense_w.data)
|
| 292 |
+
|
| 293 |
+
def transpose_mlp(self):
|
| 294 |
+
if self.mlp_linear_layer:
|
| 295 |
+
self._h4h_w = self.transpose_impl(self._h4h_w.data)
|
| 296 |
+
self._4hh_w = self.transpose_impl(self._4hh_w.data)
|
| 297 |
+
|
| 298 |
+
def transpose_impl(self, data):
|
| 299 |
+
data = data.contiguous()
|
| 300 |
+
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
|
| 301 |
+
data = data.reshape(data.shape[-1], data.shape[-2])
|
| 302 |
+
data.to(get_accelerator().current_device_name())
|
| 303 |
+
return data
|
| 304 |
+
|
| 305 |
+
def get_all_params(self):
|
| 306 |
+
params = [
|
| 307 |
+
self.attn_nw,
|
| 308 |
+
self.attn_nb,
|
| 309 |
+
self.input_nw,
|
| 310 |
+
self.input_nb,
|
| 311 |
+
]
|
| 312 |
+
|
| 313 |
+
params.extend(self.get_attn_params())
|
| 314 |
+
params.extend(self.get_mlp_params())
|
| 315 |
+
|
| 316 |
+
return params
|
| 317 |
+
|
| 318 |
+
def get_attn_params(self):
|
| 319 |
+
return [self.qkvw, self.qkvb, self.dense_w, self.dense_b]
|
| 320 |
+
|
| 321 |
+
def get_mlp_params(self):
|
| 322 |
+
return [self._h4h_w, self._h4h_b, self._4hh_w, self._4hh_b]
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base_moe.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
| 7 |
+
from .base import *
|
| 8 |
+
from deepspeed import comm as dist
|
| 9 |
+
import deepspeed.ops.transformer as transformer_inference
|
| 10 |
+
from deepspeed.accelerator import get_accelerator
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BaseTransformerMoEContainer(BaseTransformerContainer):
|
| 14 |
+
|
| 15 |
+
def __init__(self, **kwargs):
|
| 16 |
+
# Call the init function of the parent class to initialize the tensors and configs from parent class
|
| 17 |
+
super().__init__(**kwargs)
|
| 18 |
+
|
| 19 |
+
self.num_experts = self.policy.get_num_experts()
|
| 20 |
+
self.ep_world_size = dist.get_world_size()
|
| 21 |
+
self.local_ep_size = 1 if self.num_experts < self.ep_world_size else self.num_experts // self.ep_world_size
|
| 22 |
+
|
| 23 |
+
self.layer_norm_eps = self.config.layer_norm_eps if hasattr(self.config, 'layer_norm_eps') else 1e-12,
|
| 24 |
+
|
| 25 |
+
# MoE models will have a list of mlp related tensors
|
| 26 |
+
self._h4h_w = []
|
| 27 |
+
self._h4h_b = []
|
| 28 |
+
self._4hh_w = []
|
| 29 |
+
self._4hh_b = []
|
| 30 |
+
|
| 31 |
+
# Residual MoE needs extra parameters
|
| 32 |
+
self._res_h4h_w = None
|
| 33 |
+
self._res_h4h_b = None
|
| 34 |
+
self._res_4hh_w = None
|
| 35 |
+
self._res_4hh_b = None
|
| 36 |
+
self._res_coef = None
|
| 37 |
+
|
| 38 |
+
def create_ds_model_config(self):
|
| 39 |
+
self.set_hidden_heads(*self.policy.get_hidden_heads())
|
| 40 |
+
assert self.num_attention_heads % self.mp_size == 0,\
|
| 41 |
+
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
|
| 42 |
+
"This is because the attention computation is partitioned evenly among the parallel GPUs."
|
| 43 |
+
|
| 44 |
+
self.ds_model_config = transformer_inference.DeepSpeedMoEInferenceConfig(
|
| 45 |
+
hidden_size=self.hidden_size,
|
| 46 |
+
heads=self.num_attention_heads,
|
| 47 |
+
layer_norm_eps=self.layer_norm_eps,
|
| 48 |
+
fp16=self.fp16,
|
| 49 |
+
pre_layer_norm=self.pre_layer_norm,
|
| 50 |
+
mp_size=self.mp_size,
|
| 51 |
+
q_int8=self.quantize,
|
| 52 |
+
moe_experts=self.local_ep_size,
|
| 53 |
+
global_experts=self.num_experts,
|
| 54 |
+
mlp_type=self.config.moe.type,
|
| 55 |
+
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
return self.ds_model_config
|
| 59 |
+
|
| 60 |
+
def initialize_tensors(self):
|
| 61 |
+
# Set the tensors from policy (user module) to container (DS module)
|
| 62 |
+
self.set_attention(*self.policy.attention())
|
| 63 |
+
self.set_mlp(self.config.moe.type)
|
| 64 |
+
self.set_layernorm(*self.policy.layernorm())
|
| 65 |
+
|
| 66 |
+
def set_mlp(self, config_moe_type):
|
| 67 |
+
if config_moe_type == 'standard':
|
| 68 |
+
self._h4h_w, self._h4h_b, \
|
| 69 |
+
self._4hh_w, self._4hh_b = self.policy.mlp()
|
| 70 |
+
else:
|
| 71 |
+
self._h4h_w, self._h4h_b, self._4hh_w, \
|
| 72 |
+
self._4hh_b, self._res_h4h_w, self._res_h4h_b, \
|
| 73 |
+
self._res_4hh_w, self._res_4hh_b, \
|
| 74 |
+
self._res_coef = self.policy.mlp(config_moe_type)
|
| 75 |
+
|
| 76 |
+
def transpose(self):
|
| 77 |
+
self.transpose_attention()
|
| 78 |
+
self.transpose_mlp()
|
| 79 |
+
|
| 80 |
+
if self.config.moe.type == 'residual':
|
| 81 |
+
self.transpose_residual()
|
| 82 |
+
|
| 83 |
+
def transpose_mlp(self):
|
| 84 |
+
self._h4h_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._h4h_w]
|
| 85 |
+
self._4hh_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._4hh_w]
|
| 86 |
+
|
| 87 |
+
def transpose_residual(self):
|
| 88 |
+
self._res_h4h_w.data = self.transpose_impl(self._res_h4h_w.data)
|
| 89 |
+
self._res_4hh_w.data = self.transpose_impl(self._res_4hh_w.data)
|
| 90 |
+
self._res_coef.data = self.transpose_impl(self._res_coef.data)
|
| 91 |
+
|
| 92 |
+
def apply_tensor_parallelism(self, mp_replace):
|
| 93 |
+
# setup the new Attention module
|
| 94 |
+
self.attention_qkv_mp(mp_replace)
|
| 95 |
+
self.attention_o_mp(mp_replace)
|
| 96 |
+
|
| 97 |
+
# quantize attention weights
|
| 98 |
+
self.attention_quantization()
|
| 99 |
+
|
| 100 |
+
# setup the new MLP module
|
| 101 |
+
self.mlp_mp()
|
| 102 |
+
|
| 103 |
+
def mlp_mp(self):
|
| 104 |
+
gpu_index = dist.get_rank()
|
| 105 |
+
for ep_index in range(self.local_ep_size):
|
| 106 |
+
# mlp inter
|
| 107 |
+
self.module.mlp[ep_index].inter_w.data = self._h4h_w[gpu_index * self.local_ep_size + ep_index].to(
|
| 108 |
+
get_accelerator().current_device_name())
|
| 109 |
+
self.module.mlp[ep_index].inter_b.data = self._h4h_b[gpu_index * self.local_ep_size + ep_index].to(
|
| 110 |
+
get_accelerator().current_device_name())
|
| 111 |
+
|
| 112 |
+
# mlp output
|
| 113 |
+
self.module.mlp[ep_index].output_w.data = self._4hh_w[gpu_index * self.local_ep_size + ep_index].to(
|
| 114 |
+
get_accelerator().current_device_name())
|
| 115 |
+
self.module.mlp[ep_index].output_b.data = self._4hh_b[gpu_index * self.local_ep_size + ep_index].to(
|
| 116 |
+
get_accelerator().current_device_name())
|
| 117 |
+
|
| 118 |
+
def copy_data_to_new_module(self):
|
| 119 |
+
self.module.attn_nw.data = self.attn_nw.to(get_accelerator().current_device_name())
|
| 120 |
+
self.module.attn_nb.data = self.attn_nb.to(get_accelerator().current_device_name())
|
| 121 |
+
|
| 122 |
+
self.module.norm_w.data.copy_(self.input_nw.to(get_accelerator().current_device_name()))
|
| 123 |
+
self.module.norm_b.data.copy_(self.input_nb.to(get_accelerator().current_device_name()))
|
| 124 |
+
|
| 125 |
+
if self.config.moe.type == 'residual':
|
| 126 |
+
self.module.res_mlp.inter_w.data = self._res_h4h_w.to(get_accelerator().current_device_name())
|
| 127 |
+
self.module.res_mlp.inter_b.data = self._res_h4h_b.to(get_accelerator().current_device_name())
|
| 128 |
+
self.module.res_mlp.output_w.data = self._res_4hh_w.to(get_accelerator().current_device_name())
|
| 129 |
+
self.module.res_mlp.output_b.data = self._res_4hh_b.to(get_accelerator().current_device_name())
|
| 130 |
+
self.module.res_coef.data = self._res_coef.to(get_accelerator().current_device_name())
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bert.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .base import *
|
| 7 |
+
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
|
| 8 |
+
import torch
|
| 9 |
+
from torch.nn.parameter import Parameter
|
| 10 |
+
from ..policy import TransformerPolicy
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DS_BERTContainer(BaseTransformerContainer):
|
| 14 |
+
|
| 15 |
+
def __init__(self, **kwargs):
|
| 16 |
+
super().__init__(**kwargs)
|
| 17 |
+
|
| 18 |
+
# All model specific things should be defined here instead of the base class.
|
| 19 |
+
self.return_tuple = True
|
| 20 |
+
self.triangular_masking = False
|
| 21 |
+
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
|
| 22 |
+
|
| 23 |
+
def create_module(self, config=None):
|
| 24 |
+
_config = config if config is not None else self.ds_model_config
|
| 25 |
+
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
|
| 26 |
+
self.module.config.scale_attention = self.scale_attention
|
| 27 |
+
return self.module
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class HFBertLayerPolicy(TransformerPolicy):
|
| 31 |
+
|
| 32 |
+
def __init__(self, client_module, inference=False):
|
| 33 |
+
super().__init__(inference, pre_attn_norm=False)
|
| 34 |
+
self.client_module = client_module
|
| 35 |
+
self.cuda_graph_supported = True
|
| 36 |
+
|
| 37 |
+
if HFBertLayerPolicy._orig_layer_class is None:
|
| 38 |
+
try:
|
| 39 |
+
import transformers
|
| 40 |
+
HFBertLayerPolicy._orig_layer_class = [
|
| 41 |
+
transformers.models.bert.modeling_bert.BertLayer,
|
| 42 |
+
transformers.models.roberta.modeling_roberta.RobertaLayer
|
| 43 |
+
]
|
| 44 |
+
except:
|
| 45 |
+
HFBertLayerPolicy._orig_layer_class = None
|
| 46 |
+
|
| 47 |
+
def get_hidden_heads(self):
|
| 48 |
+
if self.pre_attn_norm:
|
| 49 |
+
attention_layernorm = self.client_module.PostAttentionLayerNorm
|
| 50 |
+
else:
|
| 51 |
+
attention_layernorm = self.client_module.attention.output.LayerNorm
|
| 52 |
+
return self.client_module.attention.self.query.weight.shape[1], \
|
| 53 |
+
self.client_module.attention.self.num_attention_heads, \
|
| 54 |
+
attention_layernorm.eps, \
|
| 55 |
+
DEFAULT_INTERMEDIATE_SIZE
|
| 56 |
+
|
| 57 |
+
def attention(self, enable_training=False):
|
| 58 |
+
qw = self.client_module.attention.self.query.weight
|
| 59 |
+
qb = self.client_module.attention.self.query.bias
|
| 60 |
+
kw = self.client_module.attention.self.key.weight
|
| 61 |
+
kb = self.client_module.attention.self.key.bias
|
| 62 |
+
vw = self.client_module.attention.self.value.weight
|
| 63 |
+
vb = self.client_module.attention.self.value.bias
|
| 64 |
+
|
| 65 |
+
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
|
| 66 |
+
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
|
| 67 |
+
|
| 68 |
+
return qkvw, \
|
| 69 |
+
qkvb, \
|
| 70 |
+
self.client_module.attention.output.dense.weight, \
|
| 71 |
+
self.client_module.attention.output.dense.bias, \
|
| 72 |
+
|
| 73 |
+
def mlp(self, enable_training=False):
|
| 74 |
+
if self.pre_attn_norm:
|
| 75 |
+
intermediate_ff = self.client_module.intermediate.dense_act
|
| 76 |
+
else:
|
| 77 |
+
intermediate_ff = self.client_module.intermediate.dense
|
| 78 |
+
|
| 79 |
+
return intermediate_ff.weight, intermediate_ff.bias, \
|
| 80 |
+
self.client_module.output.dense.weight, \
|
| 81 |
+
self.client_module.output.dense.bias
|
| 82 |
+
|
| 83 |
+
def layernorm(self):
|
| 84 |
+
if self.pre_attn_norm:
|
| 85 |
+
attention_layernorm = self.client_module.PostAttentionLayerNorm
|
| 86 |
+
transformer_layernorm = self.client_module.PreAttentionLayerNorm
|
| 87 |
+
else:
|
| 88 |
+
attention_layernorm = self.client_module.attention.output.LayerNorm
|
| 89 |
+
transformer_layernorm = self.client_module.output.LayerNorm
|
| 90 |
+
return attention_layernorm.weight, \
|
| 91 |
+
attention_layernorm.bias, \
|
| 92 |
+
transformer_layernorm.weight, \
|
| 93 |
+
transformer_layernorm.bias
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bloom.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .base import *
|
| 7 |
+
from .features.meta_tensor import MetaTensorContainer
|
| 8 |
+
from .features.hybrid_engine import HybridEngineContainer
|
| 9 |
+
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
|
| 10 |
+
from ..policy import TransformerPolicy
|
| 11 |
+
from ..policy import transformer_param_names
|
| 12 |
+
from ..policy import maybe_copy
|
| 13 |
+
|
| 14 |
+
from ..policy import maybe_get_lora
|
| 15 |
+
|
| 16 |
+
supported_models = {None}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DS_BloomContainer(MetaTensorContainer, HybridEngineContainer, BaseTransformerContainer):
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super().__init__(**kwargs)
|
| 23 |
+
|
| 24 |
+
# All model specific things should be defined here instead of the base class.
|
| 25 |
+
self.bigscience_bloom = True
|
| 26 |
+
self.triangular_masking = False
|
| 27 |
+
|
| 28 |
+
def create_module(self, config=None):
|
| 29 |
+
_config = config if config is not None else self.ds_model_config
|
| 30 |
+
|
| 31 |
+
self.module = DeepSpeedBloomInference(_config, mp_group=self.mp_group)
|
| 32 |
+
self.module.config.scale_attention = self.scale_attention
|
| 33 |
+
self.module.config.invert_mask = False
|
| 34 |
+
return self.module
|
| 35 |
+
|
| 36 |
+
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
|
| 37 |
+
self.module.attention.attn_qkvw = mp_replace.copy(self.module.attention.attn_qkvw, self.qkvw)
|
| 38 |
+
self.module.attention.attn_qkvb = mp_replace.copy(self.module.attention.attn_qkvb, self.qkvb)
|
| 39 |
+
|
| 40 |
+
def get_lora_matched_pair(self):
|
| 41 |
+
"""
|
| 42 |
+
Necessary to implement for `HybridEngineContainer`
|
| 43 |
+
"""
|
| 44 |
+
fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params()
|
| 45 |
+
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)]
|
| 46 |
+
return ret
|
| 47 |
+
|
| 48 |
+
def set_lora_params(self):
|
| 49 |
+
"""
|
| 50 |
+
Necessary to implement for `HybridEngineContainer`
|
| 51 |
+
"""
|
| 52 |
+
self.lora_params = [
|
| 53 |
+
maybe_get_lora(p) for p in [
|
| 54 |
+
self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h, self.policy.
|
| 55 |
+
client_module.self_attention.query_key_value, self.policy.client_module.self_attention.dense
|
| 56 |
+
]
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
|
| 60 |
+
param_names = (
|
| 61 |
+
'self_attention.query_key_value.weight', \
|
| 62 |
+
'self_attention.query_key_value.bias', \
|
| 63 |
+
'self_attention.dense.weight', \
|
| 64 |
+
'self_attention.dense.bias', \
|
| 65 |
+
'mlp.dense_h_to_4h.weight', \
|
| 66 |
+
'mlp.dense_h_to_4h.bias', \
|
| 67 |
+
'mlp.dense_4h_to_h.weight', \
|
| 68 |
+
'mlp.dense_4h_to_h.bias', \
|
| 69 |
+
'post_attention_layernorm.weight', \
|
| 70 |
+
'post_attention_layernorm.bias', \
|
| 71 |
+
'input_layernorm.weight', \
|
| 72 |
+
'input_layernorm.bias'
|
| 73 |
+
)
|
| 74 |
+
for i in range(0, 2):
|
| 75 |
+
maybe_copy(module.attention,
|
| 76 |
+
sd,
|
| 77 |
+
weight_quantizer,
|
| 78 |
+
mp_replace,
|
| 79 |
+
transformer_param_names[i],
|
| 80 |
+
prefix + param_names[i],
|
| 81 |
+
qkv=True,
|
| 82 |
+
megatron_v2=self.policy.is_megatron_v2,
|
| 83 |
+
split_qkv=self.policy.split_qkv)
|
| 84 |
+
for i in range(2, 4):
|
| 85 |
+
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
|
| 86 |
+
prefix + param_names[i])
|
| 87 |
+
for i in range(4, 10):
|
| 88 |
+
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
|
| 89 |
+
prefix + param_names[i])
|
| 90 |
+
for i in range(10, 12):
|
| 91 |
+
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class BLOOMLayerPolicy(TransformerPolicy):
|
| 95 |
+
_orig_layer_class = None
|
| 96 |
+
|
| 97 |
+
def __init__(self, client_module, inference=True, use_load_prefix=True, split_qkv=False):
|
| 98 |
+
super().__init__(inference, linear_layer=True, use_load_prefix=use_load_prefix, split_qkv=split_qkv)
|
| 99 |
+
self.client_module = client_module
|
| 100 |
+
try:
|
| 101 |
+
import transformers
|
| 102 |
+
BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock
|
| 103 |
+
global supported_models
|
| 104 |
+
supported_models.update({transformers.models.bloom.modeling_bloom.BloomModel})
|
| 105 |
+
except Exception as e:
|
| 106 |
+
print(f"WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: {e}")
|
| 107 |
+
BLOOMLayerPolicy._orig_layer_class = None
|
| 108 |
+
|
| 109 |
+
def get_hidden_heads(self):
|
| 110 |
+
return self.client_module.self_attention.hidden_size, \
|
| 111 |
+
self.client_module.self_attention.num_heads, \
|
| 112 |
+
self.client_module.input_layernorm.eps, \
|
| 113 |
+
DEFAULT_INTERMEDIATE_SIZE
|
| 114 |
+
|
| 115 |
+
def attention(self, enable_training=False):
|
| 116 |
+
return self.client_module.self_attention.query_key_value.weight, \
|
| 117 |
+
self.client_module.self_attention.query_key_value.bias, \
|
| 118 |
+
self.client_module.self_attention.dense.weight, \
|
| 119 |
+
self.client_module.self_attention.dense.bias,
|
| 120 |
+
|
| 121 |
+
def mlp(self, enable_training=False):
|
| 122 |
+
return self.client_module.mlp.dense_h_to_4h.weight, \
|
| 123 |
+
self.client_module.mlp.dense_h_to_4h.bias, \
|
| 124 |
+
self.client_module.mlp.dense_4h_to_h.weight, \
|
| 125 |
+
self.client_module.mlp.dense_4h_to_h.bias
|
| 126 |
+
|
| 127 |
+
def layernorm(self):
|
| 128 |
+
return self.client_module.post_attention_layernorm.weight, \
|
| 129 |
+
self.client_module.post_attention_layernorm.bias, \
|
| 130 |
+
self.client_module.input_layernorm.weight, \
|
| 131 |
+
self.client_module.input_layernorm.bias
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/clip.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .base import *
|
| 7 |
+
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
|
| 8 |
+
import torch
|
| 9 |
+
from torch.nn.parameter import Parameter
|
| 10 |
+
from ..policy import TransformerPolicy
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DS_CLIPContainer(BaseTransformerContainer):
|
| 14 |
+
|
| 15 |
+
def __init__(self, **kwargs):
|
| 16 |
+
super().__init__(**kwargs)
|
| 17 |
+
|
| 18 |
+
# All model specific things should be defined here instead of the base class.
|
| 19 |
+
|
| 20 |
+
def create_module(self, config=None):
|
| 21 |
+
_config = config if config is not None else self.ds_model_config
|
| 22 |
+
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
|
| 23 |
+
self.module.config.scale_attention = self.scale_attention
|
| 24 |
+
return self.module
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class HFCLIPLayerPolicy(TransformerPolicy):
|
| 28 |
+
|
| 29 |
+
def __init__(self, client_module, inference=False):
|
| 30 |
+
super().__init__(inference, pre_attn_norm=True, scale_attention=True)
|
| 31 |
+
self.client_module = client_module
|
| 32 |
+
self.cuda_graph_supported = True
|
| 33 |
+
|
| 34 |
+
if HFCLIPLayerPolicy._orig_layer_class is None:
|
| 35 |
+
try:
|
| 36 |
+
import transformers
|
| 37 |
+
HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer
|
| 38 |
+
except:
|
| 39 |
+
HFCLIPLayerPolicy._orig_layer_class = None
|
| 40 |
+
|
| 41 |
+
def get_hidden_heads(self):
|
| 42 |
+
return self.client_module.self_attn.q_proj.weight.shape[1], \
|
| 43 |
+
self.client_module.self_attn.num_heads, \
|
| 44 |
+
self.client_module.layer_norm1.eps, \
|
| 45 |
+
DEFAULT_INTERMEDIATE_SIZE
|
| 46 |
+
|
| 47 |
+
def attention(self, enable_training=False):
|
| 48 |
+
qw = self.client_module.self_attn.q_proj.weight
|
| 49 |
+
qb = self.client_module.self_attn.q_proj.bias
|
| 50 |
+
kw = self.client_module.self_attn.k_proj.weight
|
| 51 |
+
kb = self.client_module.self_attn.k_proj.bias
|
| 52 |
+
vw = self.client_module.self_attn.v_proj.weight
|
| 53 |
+
vb = self.client_module.self_attn.v_proj.bias
|
| 54 |
+
|
| 55 |
+
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
|
| 56 |
+
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
|
| 57 |
+
|
| 58 |
+
return qkvw, \
|
| 59 |
+
qkvb, \
|
| 60 |
+
self.client_module.self_attn.out_proj.weight, \
|
| 61 |
+
self.client_module.self_attn.out_proj.bias
|
| 62 |
+
|
| 63 |
+
def mlp(self, enable_training=False):
|
| 64 |
+
return self.client_module.mlp.fc1.weight, \
|
| 65 |
+
self.client_module.mlp.fc1.bias, \
|
| 66 |
+
self.client_module.mlp.fc2.weight, \
|
| 67 |
+
self.client_module.mlp.fc2.bias
|
| 68 |
+
|
| 69 |
+
def layernorm(self):
|
| 70 |
+
return self.client_module.layer_norm2.weight, \
|
| 71 |
+
self.client_module.layer_norm2.bias, \
|
| 72 |
+
self.client_module.layer_norm1.weight, \
|
| 73 |
+
self.client_module.layer_norm1.bias
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/distil_bert.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .base import *
|
| 7 |
+
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
|
| 8 |
+
import torch
|
| 9 |
+
from torch.nn.parameter import Parameter
|
| 10 |
+
from ..policy import TransformerPolicy
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DS_DistilBERTContainer(BaseTransformerContainer):
|
| 14 |
+
|
| 15 |
+
def __init__(self, **kwargs):
|
| 16 |
+
super().__init__(**kwargs)
|
| 17 |
+
|
| 18 |
+
# All model specific things should be defined here instead of the base class.
|
| 19 |
+
self.triangular_masking = False
|
| 20 |
+
self.return_single_tuple = True
|
| 21 |
+
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
|
| 22 |
+
|
| 23 |
+
def create_module(self, config=None):
|
| 24 |
+
_config = config if config is not None else self.ds_model_config
|
| 25 |
+
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
|
| 26 |
+
self.module.config.scale_attention = self.scale_attention
|
| 27 |
+
return self.module
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class HFDistilBertLayerPolicy(TransformerPolicy):
|
| 31 |
+
_orig_layer_class = None
|
| 32 |
+
|
| 33 |
+
def __init__(self, client_module, inference=False, preln=False):
|
| 34 |
+
super().__init__(inference)
|
| 35 |
+
self.client_module = client_module
|
| 36 |
+
self.preln = preln
|
| 37 |
+
self.cuda_graph_supported = True
|
| 38 |
+
if HFDistilBertLayerPolicy._orig_layer_class is None:
|
| 39 |
+
try:
|
| 40 |
+
import transformers
|
| 41 |
+
HFDistilBertLayerPolicy._orig_layer_class = [
|
| 42 |
+
transformers.models.distilbert.modeling_distilbert.TransformerBlock,
|
| 43 |
+
]
|
| 44 |
+
except:
|
| 45 |
+
HFDistilBertLayerPolicy._orig_layer_class = None
|
| 46 |
+
|
| 47 |
+
def get_hidden_heads(self):
|
| 48 |
+
return self.client_module.attention.q_lin.weight.shape[1], \
|
| 49 |
+
self.client_module.attention.n_heads, \
|
| 50 |
+
self.client_module.sa_layer_norm.eps, \
|
| 51 |
+
DEFAULT_INTERMEDIATE_SIZE
|
| 52 |
+
|
| 53 |
+
def attention(self, enable_training=False):
|
| 54 |
+
qw = self.client_module.attention.q_lin.weight
|
| 55 |
+
qb = self.client_module.attention.q_lin.bias
|
| 56 |
+
kw = self.client_module.attention.k_lin.weight
|
| 57 |
+
kb = self.client_module.attention.k_lin.bias
|
| 58 |
+
vw = self.client_module.attention.v_lin.weight
|
| 59 |
+
vb = self.client_module.attention.v_lin.bias
|
| 60 |
+
|
| 61 |
+
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
|
| 62 |
+
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
|
| 63 |
+
|
| 64 |
+
return qkvw, \
|
| 65 |
+
qkvb, \
|
| 66 |
+
self.client_module.attention.out_lin.weight, \
|
| 67 |
+
self.client_module.attention.out_lin.bias
|
| 68 |
+
|
| 69 |
+
def mlp(self, enable_training=False):
|
| 70 |
+
intermediate_ff = self.client_module.ffn.lin1
|
| 71 |
+
|
| 72 |
+
return intermediate_ff.weight, intermediate_ff.bias, \
|
| 73 |
+
self.client_module.ffn.lin2.weight, \
|
| 74 |
+
self.client_module.ffn.lin2.bias
|
| 75 |
+
|
| 76 |
+
def layernorm(self):
|
| 77 |
+
attention_layernorm = self.client_module.sa_layer_norm
|
| 78 |
+
transformer_layernorm = self.client_module.output_layer_norm
|
| 79 |
+
return attention_layernorm.weight, \
|
| 80 |
+
attention_layernorm.bias, \
|
| 81 |
+
transformer_layernorm.weight, \
|
| 82 |
+
transformer_layernorm.bias
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
# DeepSpeed Team
|
| 5 |
+
|
| 6 |
+
from .gated_mlp import HybridGatedMLPContainer
|
| 7 |
+
from .megatron import MegatronContainer
|
| 8 |
+
from .meta_tensor import MetaTensorContainer
|
| 9 |
+
from .split_qkv import HybridSplitQKVContainer
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (430 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/gated_mlp.cpython-310.pyc
ADDED
|
Binary file (3.96 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_engine.cpython-310.pyc
ADDED
|
Binary file (8.63 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_megatron.cpython-310.pyc
ADDED
|
Binary file (4.05 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/megatron.cpython-310.pyc
ADDED
|
Binary file (1.51 kB). View file
|
|
|
venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/meta_tensor.cpython-310.pyc
ADDED
|
Binary file (3.48 kB). View file
|
|
|