diff --git a/.gitattributes b/.gitattributes index 46b42f0407632e45394eeb91839402d13d273738..0167660d5e399c31cd91aa2fc2fdbc3854ef488c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -130,3 +130,6 @@ lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filt venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/comm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2c26ef4913085748dc10d720bb723d784ee9335e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .utils import * +from .comm import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..655f50de988ba280f6b6b29b97ec8a217763d34f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f688146f1becee46b2d120952a00ae12002e7b35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..023d97e275b9448dcd3e043a89bea60cddfb7d02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/reduce_op.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..244b90f3a6edb33243768d99550e728b5bd6e0bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/backend.py b/venv/lib/python3.10/site-packages/deepspeed/comm/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..15dc96c40521222128310740110420ac38668819 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/backend.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +DeepSpeed Communication Backend. + +In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now. + +# Custom DS Backends -- Direct C/Ops + - NCCL -- [EXPERIMENTAL] + - MPI -- [EXPERIMENTAL] + - RCCL -- [EXPERIMENTAL] + - GLOO -- [EXPERIMENTAL] +# DS backend wrapper for torch.distributed [DEFAULT] + - T-NCCL -- [DEFAULT] + - T-GLOO + - T-MPI + +# Backend is the base class + -- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now. +""" + + +class Backend(object): + + def __init__(self, name='backend', rank=0, size=1): + self.name = name + # The world size and rank of the world process group + self.world_group = None + self.world_size = size + self.world_rank = rank + # Single process group (pg) implementation for now but keep a list for future + self.process_groups = [] + self.initialized = False + + def is_initialized(self): + return self.initialized + + def new_group(self): + # create a new pg and add it to pg list + pass + + def init_process_group(self): + # subclasses will initialize them fully + # - initialize a default world process group and add it to pg list + self.initialized = True diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py b/venv/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py new file mode 100644 index 0000000000000000000000000000000000000000..49393f0c7227d667508359e227c6761dd6b35505 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/reduce_op.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum + + +class ReduceOp(Enum): + SUM = 0 + PRODUCT = 1 + MIN = 2 + MAX = 3 + BAND = 4 + BOR = 5 + BXOR = 6 + AVG = 7 + UNUSED = 8 diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/torch.py b/venv/lib/python3.10/site-packages/deepspeed/comm/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..14d4f384731565b118347e378d9f374926bff35c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/comm/torch.py @@ -0,0 +1,392 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import deepspeed +from deepspeed import utils + +from .utils import * +from .backend import * +from .comm import * +from ..runtime import compiler +from deepspeed.utils.torch import required_torch_version +import os + +DS_COMM_ALL_GATHER_OFF = False +DS_COMM_REDUCE_SCATTER_OFF = False +DS_COMM_BROADCAST_OFF = False +DS_COMM_ALL_REDUCE_OFF = False +DS_COMM_REDUCE_OFF = False + + +def build_shm_op(): + builder = get_accelerator().create_op_builder("ShareMemCommBuilder") + if builder is None or not deepspeed.ops.__compatible_ops__[builder.NAME]: + return None + shm_cpp_module = builder.load() + print(f'DeepSpeed {builder.absolute_name()} built successfully') + return shm_cpp_module + + +def has_coalescing_manager(): + has_c10d = hasattr(torch.distributed, 'distributed_c10d') + return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager') + + +def has_all_reduce_coalesced(): + return hasattr(torch.distributed, "all_reduce_coalesced") and required_torch_version(min_version=1.13) + + +def get_coalescing_manager(group, device, reqs, async_op): + if required_torch_version(min_version=2.0, max_version=2.0): + return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs) + elif required_torch_version(min_version=2.1): + return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, async_ops=async_op) + else: + return torch.distributed.distributed_c10d._coalescing_manager(group, reqs) + + +##Utilities to turn comm off +##TODO: move to base comm (wrapper) +def all_gather_comm_off(flag=False): + global DS_COMM_ALL_GATHER_OFF + DS_COMM_ALL_GATHER_OFF = flag + + +def reduce_scatter_comm_off(flag=False): + global DS_COMM_REDUCE_SCATTER_OFF + DS_COMM_REDUCE_SCATTER_OFF = flag + + +def broadcast_comm_off(flag=False): + global DS_COMM_BROADCAST_OFF + DS_COMM_BROADCAST_OFF = flag + + +def all_reduce_comm_off(flag=False): + global DS_COMM_ALL_REDUCE_OFF + DS_COMM_ALL_REDUCE_OFF = flag + + +def reduce_comm_off(flag=False): + global DS_COMM_REDUCE_OFF + DS_COMM_REDUCE_OFF = flag + + +#assumption: all_gather and reduce scatter +## are what we care about +def backward_comm_off(flag=False): + all_gather_comm_off(flag) + reduce_scatter_comm_off(flag) + + +class Noop: + + def wait(self): + return None + + +class TorchBackend(Backend): + """ + A light-weight wrapper class for torch.distributed API. + Only a subset of functions are wrapped. Once the init_process_group + is initialized, standard torch.distributed.* can be used directly + so no need to wrap all the functions. We can keep adding wrappers as + needed. + """ + + def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'): + super(TorchBackend, self).__init__() + self.shm_comm_op = build_shm_op() + self.has_all_reduce_coalesced = has_all_reduce_coalesced() + self.has_coalescing_manager = has_coalescing_manager() + self.all_gather_function = self.get_all_gather_function() + self.reduce_scatter_function = self.get_reduce_scatter_function() + self.initialized = True + self.name = name + # Future functionality to support ds.initialize() on a single GPU + # The idea is to fake that dist backend is initialized even when + # it is not so we can run on a single GPU without doing any init_process_group + self.single_gpu_mode = True + self.init_process_group(backend, timeout, init_method, rank, world_size) + if self.shm_comm_op != None: + self.shm_comm_op.initialize(self.get_world_size(), self.get_rank()) + + @classmethod + @compiler.disable + def get_all_gather_function(self): + if hasattr(torch.distributed, "all_gather_into_tensor"): + return torch.distributed.all_gather_into_tensor + elif hasattr(torch.distributed, "_all_gather_base"): + return torch.distributed._all_gather_base + return None + + @classmethod + @compiler.disable + def get_reduce_scatter_function(self): + if hasattr(torch.distributed, "reduce_scatter_tensor"): + return torch.distributed.reduce_scatter_tensor + elif hasattr(torch.distributed, "_reduce_scatter_base"): + return torch.distributed._reduce_scatter_base + return None + + def has_all_gather_into_tensor(self): + return self.all_gather_function is not None + + def has_reduce_scatter_tensor(self): + return self.reduce_scatter_function is not None + + def init_process_group(self, backend, timeout, init_method, rank, world_size): + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend, + timeout=timeout, + init_method=init_method, + rank=rank, + world_size=world_size) + self.using_mpi = torch.distributed.get_backend() == 'mpi' + + @compiler.disable + def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): + op = self._reduce_op(op) + return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) + + @compiler.disable + def inference_all_reduce(self, tensor, op, group=None): + if self.shm_comm_op == None or self.shm_comm_op.inference_all_reduce(tensor, op) == -1: + op = self._reduce_op(op) + return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=False) + + @compiler.disable + def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): + """ proxy func to torch.distributed.all_reduce_coalesced, + which is included in PyTorch 1.13 and above + """ + if not self.has_all_reduce_coalesced: + raise RuntimeError(f"Current torch version does not have all_reduce_coalesced " + f"api (torch.__version__: {torch.__version__})") + op = self._reduce_op(op) + return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op) + + @compiler.disable + def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): + if DS_COMM_REDUCE_OFF: + if int(os.getenv('RANK', '0')) == 0: + utils.logger.warning("REDUCE is OFF") + return Noop() + return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op) + + @compiler.disable + def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False): + if DS_COMM_REDUCE_SCATTER_OFF: + if int(os.getenv('RANK', '0')) == 0: + utils.logger.warning("REDUCE SCATTER is OFF") + return Noop() + else: + return torch.distributed.reduce_scatter(output=output, + input_list=input_list, + op=self._reduce_op(op), + group=group, + async_op=async_op) + + @compiler.disable + def broadcast(self, tensor, src, group=None, async_op=False): + if DS_COMM_BROADCAST_OFF: + if int(os.getenv('RANK', '0')) == 0: + utils.logger.warning("BROADCAST is OFF") + return Noop() + else: + return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) + + @compiler.disable + def all_gather(self, tensor_list, tensor, group=None, async_op=False): + if DS_COMM_ALL_GATHER_OFF: + if int(os.getenv('RANK', '0')) == 0: + utils.logger.warning("All Gather is OFF") + return Noop() + else: + return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) + + @compiler.disable + def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False): + if self.has_all_gather_into_tensor(): + return self.all_gather_function(output_tensor=output_tensor, + input_tensor=input_tensor, + group=group, + async_op=async_op) + + @compiler.disable + def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False): + if DS_COMM_ALL_GATHER_OFF: + if int(os.getenv('RANK', '0')) == 0: + utils.logger.warning("All Gather is OFF") + return Noop() + else: + if self.has_allgather_base: + return torch.distributed.distributed_c10d._all_gather_base(output_tensor=output_tensor, + input_tensor=input_tensor, + group=group, + async_op=async_op) + else: + utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to " + "torch.distributed.reduce_scatter which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + pass + + @compiler.disable + def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False): + """""" + assert len(output_tensors) == len(input_tensors), "" + if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'): + # customized PyTorch + return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors, + input_tensors, + group=group, + async_op=async_op) + elif has_coalescing_manager(): + reqs = [] + with get_coalescing_manager(group, input_tensors[0].device, reqs, async_op): + for output, input in zip(output_tensors, input_tensors): + handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output, + input, + group=group, + async_op=True) + reqs.append(handle) + if async_op: + return reqs[-1] + else: + reqs[-1].wait() + + @compiler.disable + def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False): + if self.has_reduce_scatter_tensor(): + return self.reduce_scatter_function(output_tensor, + input_tensor, + op=self._reduce_op(op), + group=group, + async_op=async_op) + else: + utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to " + "torch.distributed.reduce_scatter which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + pass + + @compiler.disable + def all_to_all_single(self, + output, + input, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False): + return torch.distributed.all_to_all_single(output=output, + input=input, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + async_op=async_op) + + @compiler.disable + def all_to_all(self, output_tensor_list, input_tensor_list, group=None, async_op=False): + return torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=group, async_op=async_op) + + @compiler.disable + def send(self, tensor, dst, group=None, tag=0): + return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag) + + @compiler.disable + def recv(self, tensor, src=None, group=None, tag=0): + return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag) + + @compiler.disable + def isend(self, tensor, dst, group=None, tag=0): + return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag) + + @compiler.disable + def irecv(self, tensor, src=None, group=None, tag=0): + return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag) + + @compiler.disable + def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): + return torch.distributed.gather(tensor=tensor, + gather_list=gather_list, + dst=dst, + group=group, + async_op=async_op) + + @compiler.disable + def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): + return torch.distributed.scatter(tensor=tensor, + scatter_list=scatter_list, + src=src, + group=group, + async_op=async_op) + + @compiler.disable + def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None): + if group is None: + group = torch.distributed.GroupMember.WORLD + return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids) + + @compiler.disable + def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False): + if group is None: + group = torch.distributed.GroupMember.WORLD + return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) + + def get_rank(self, group=None): + return torch.distributed.get_rank(group=group) + + def get_world_size(self, group=None): + return torch.distributed.get_world_size(group=group) + + def is_initialized(self): + return torch.distributed.is_initialized() + + def get_backend(self, group=None): + return torch.distributed.get_backend(group=group) + + def new_group(self, ranks): + return torch.distributed.new_group(ranks) + + def get_global_rank(self, group, group_rank): + if hasattr(torch.distributed.distributed_c10d, "get_global_rank"): + from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank + else: + from torch.distributed.distributed_c10d import _get_global_rank + return _get_global_rank(group, group_rank) + + def get_world_group(self): + return torch.distributed.group.WORLD + + def destroy_process_group(self, group=None): + return torch.distributed.destroy_process_group(group=group) + + def _reduce_op(self, op): + ''' + Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return + ''' + if not isinstance(op, torch.distributed.ReduceOp): + if op == ReduceOp.SUM: + op = torch.distributed.ReduceOp.SUM + elif op == ReduceOp.PRODUCT: + op = torch.distributed.ReduceOp.PRODUCT + elif op == ReduceOp.AVG: + op = torch.distributed.ReduceOp.AVG + elif op == ReduceOp.MIN: + op = torch.distributed.ReduceOp.MIN + elif op == ReduceOp.MAX: + op = torch.distributed.ReduceOp.MAX + elif op == ReduceOp.BAND: + op = torch.distributed.ReduceOp.BAND + elif op == ReduceOp.BOR: + op = torch.distributed.ReduceOp.BOR + elif op == ReduceOp.BXOR: + op = torch.distributed.ReduceOp.BXOR + return op + + +# This will become a light-weight wrapper around torch.distributed functions +# TODO: create some example to show how this wrapper can help profile communication +# TODO: make sure there is no performance regression with this approach +# TODO: explore monkey-patching if this does not work diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8463321cfc34440f033e001ce70a393bad0c8ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58c94ac7f37dc226df7ea5f14307b7b75347ba11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/inject.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/inject.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74b627d549a50366fd70b328d52ab026f7841df1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/inject.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/layers.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f8f05335a507f1bbd1a5ad10f5945f7dba86adf Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/layers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/load_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/load_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..511796c5cee82d0279828c3634c102a2f40ab073 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/load_checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aa8f4ba4b21ca8c342c1809fa53644a4f04c7ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46356f58583c9782e272b023b11236ff98da01ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d627fcf6b39708121a3e2e086098e6559aef21d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/replace_policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/tp_shard.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/tp_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99b08389c9055110eb5b3c75a0f7dc817b6b63ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/tp_shard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..619418367f6cf694b35490bc8c3ffcf7aa13e3d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..993d14071659303ea6702b0d5182c6d04c931905 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .bert import DS_BERTContainer, HFBertLayerPolicy +from .bloom import DS_BloomContainer, BLOOMLayerPolicy, supported_models +from .distil_bert import DS_DistilBERTContainer, HFDistilBertLayerPolicy +from .gpt2 import DS_GPT2Container, HFGPT2LayerPolicy +from .gptj import DS_GPTJContainer, HFGPTJLayerPolicy +from .gptneo import DS_GPTNEOContainer, HFGPTNEOLayerPolicy +from .gptneox import DS_GPTNEOXContainer, GPTNEOXLayerPolicy +from .llama import DS_LLAMAContainer, LLAMALayerPolicy +from .llama2 import LLAMA2LayerPolicy, DS_LLAMA2Container +from .internlm import DS_InternLMContainer, InternLMLayerPolicy +from .megatron_gpt import DS_MegatronGPTContainer, MegatronLayerPolicy +from .megatron_gpt_moe import DS_MegatronGPTMoEContainer, MegatronMoELayerPolicy +from .opt import DS_OPTContainer, HFOPTLayerPolicy +from .clip import DS_CLIPContainer, HFCLIPLayerPolicy +from .unet import UNetPolicy +from .vae import VAEPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf00e3bd48bf81d6423272d39d7abbcaf34d90d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..831b26c535ee255a4ed859f19ae7caa43939a0a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bert.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3da92cb026b512bad645038bd4993fd761193e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bloom.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bloom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..051c5c569a2e95d909c3a9fcaea4a0472074cbed Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/bloom.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f29abbf92c03c8a67fbd1d04b499a031cf44f90c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/distil_bert.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/distil_bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7de3debd6098fde1c32082a3d8857c482620416c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/distil_bert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gpt2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ec51e55bf39071275fdbf18e90884ef076aaad5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gpt2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptj.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20558d84508a201eba2152b0a7009956c19ddf52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneo.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b14437ac6b38f7e8c213efdacecce9011611d8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneox.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a23c94f8c9babd86ae0fbaa2b76ba33498be9451 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/gptneox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/internlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/internlm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cea69cbd20acc41d9d907af7b7447b52c2d21e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/internlm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef192c11af00b2cb3899f000ed8d02d98352de1b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9251af14b656240a17973048e57377f5d1fd6209 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/llama2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f4b29573fdddfd66f0c35ebf9e40421bf49201d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt_moe.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt_moe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..964e1ee50173861f6cccb2ad24fceec2f89cb828 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/megatron_gpt_moe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/unet.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/unet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f252edaa82ba0edc393f2a349598cb8cf64b893 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/unet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/vae.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/vae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f1ab8a6341674018a0cf93fe92aef87daa2f82e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/__pycache__/vae.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..83e109167ffec669b6b1d77c2996010a70678f27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base.py @@ -0,0 +1,322 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. +from abc import ABC + +import torch + +import deepspeed +from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig +from deepspeed.accelerator import get_accelerator + +# If the intermediate size attribute is set DEFAULT_INTERMEDIATE_SIZE +# it is assumed the intermediate size is 4x the embedding dimension +DEFAULT_INTERMEDIATE_SIZE = -1 + + +class BaseConvolutionContainer(ABC): + # not implemented + def __init__(self): + pass + + +class BaseTransformerContainer(ABC): + + def __init__(self, policy, config, model_config, layer_id, child): + self.policy = policy + self.config = config + self.model_config = model_config + self.layer_id = layer_id + self.child = child + + self.megatron_v2 = self.policy.is_megatron_v2 + self.scale_attention = self.policy.scale_attention + self.ckpt_load_enabled = False + + # configuration for models. todo: can this be moved to a pydantic model config? + self.hidden_size = None + self.intermediate_size = None + self.num_attention_heads = None + self.mp_size = self.config.tensor_parallel.tp_size + self.pre_layer_norm = self.model_config.do_layer_norm_before if \ + hasattr(self.model_config, 'do_layer_norm_before') else self.policy.pre_attn_norm + self.dtype = self.config.dtype + self.attn_linear_layer = self.policy.linear_layer + self.mlp_linear_layer = self.policy.linear_layer + self.return_tuple = self.config.return_tuple + self.triangular_masking = True + self.local_attention = ((self.model_config.attention_layers[self.layer_id] == "local") if hasattr( + self.model_config, 'attention_layers') else False) + self.window_size = getattr(self.model_config, "window_size", 1) + self.mlp_act_func_type = self.policy.mlp_act_func_type + self.norm_type = self.policy.norm_type + self.training_mp_size = self.config.training_mp_size + self.bigscience_bloom = False + self.max_out_tokens = self.config.max_out_tokens + self.min_out_tokens = self.config.min_out_tokens + self.scale_attn_by_inverse_layer_idx = getattr(self.config, "scale_attn_by_inverse_layer_idx", False) + self.use_mup = self.policy.use_mup + self.return_single_tuple = False + self.rotary_dim = self.get_rotary_dim() + self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0) + + # Attention tensors + self.qkvw = None + self.qkvb = None + self.dense_w = None + self.dense_b = None + # MLP tensors + self._h4h_w = None + self._h4h_b = None + self._4hh_w = None + self._4hh_b = None + # LayerNorm tensors + self.attn_nw = None + self.attn_nb = None + self.input_nw = None + self.input_nb = None + + self.mp_group = None + self.use_triton = False + + # Triton + self.use_triton = config.use_triton and deepspeed.HAS_TRITON + + def create_ds_model_config(self): + self.set_hidden_heads(*self.policy.get_hidden_heads()) + assert self.num_attention_heads % self.mp_size == 0,\ + "To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\ + "This is because the attention computation is partitioned evenly among the parallel GPUs." + + self.ds_model_config = DeepSpeedInferenceConfig( + hidden_size=self.hidden_size, + intermediate_size=self.intermediate_size, + heads=self.num_attention_heads, + layer_norm_eps=self.layernorm_epsilon, + dtype=self.dtype, + pre_layer_norm=self.pre_layer_norm, + norm_type=self.norm_type, + mp_size=self.mp_size, + return_tuple=self.return_tuple, + triangular_masking=self.triangular_masking, + local_attention=self.local_attention, + window_size=self.window_size, + rotary_dim=self.rotary_dim, + mlp_after_attn=self.mlp_after_attn, + mlp_act_func_type=self.mlp_act_func_type, + training_mp_size=self.training_mp_size, + bigscience_bloom=self.bigscience_bloom, + max_out_tokens=self.max_out_tokens, + min_out_tokens=self.min_out_tokens, + scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx, + use_mup=self.use_mup, + return_single_tuple=self.return_single_tuple, + set_empty_params=self.config.set_empty_params, + transposed_mode=self.config.transposed_mode, + use_triton=self.use_triton, + triton_autotune=self.config.triton_autotune) + + if self.use_triton and deepspeed.HAS_TRITON: + from .bert import DS_BERTContainer + if not isinstance(self, DS_BERTContainer): + raise NotImplementedError("Triton kernels are only for BERT-like models yet") + + if not self.config.triton_autotune: + from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul + fp16_matmul.skip_autotune() + + return self.ds_model_config + + def check_meta_tensor_support(self): + if hasattr(self.qkvw, 'is_meta'): + if self.qkvw.is_meta: + assert self.ckpt_load_enabled, "Meta tensors are not supported for this model currently." + else: + raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+") + + def initialize_tensors(self, enable_training=False): + # Set the tensors from policy (user module) to container (DS module) + self.set_attention(*self.policy.attention(enable_training=enable_training)) + self.set_mlp(*self.policy.mlp(enable_training=enable_training)) + self.set_layernorm(*self.policy.layernorm()) + #self.check_meta_tensor_support() + + def convert_to_required_dtype(self): + # Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy + if self.dtype in [torch.half, torch.bfloat16]: + for k, v in self.__dict__.items(): + # The list comprehension is used for MoE tensor lists + if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \ + or isinstance(tensor, torch.nn.Parameter)) for tensor in v): + self.__dict__[k] = [moe_tensor.to(self.dtype) for moe_tensor in v] + + if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter): + self.__dict__[k] = v.to(self.dtype) + + def get_rotary_dim(self): + if hasattr(self.model_config, 'rotary_dim'): + return self.model_config.rotary_dim + if hasattr(self.child, 'attention') and hasattr(self.child.attention, 'rotary_ndims'): + return self.child.attention.rotary_ndims + return -1 + + def set_moe(self, moe=False): + self.moe = moe + + def set_tensor_parallel_config(self, mp_size, mp_group): + self.mp_size = mp_size + self.mp_group = mp_group + + def set_quantization_config(self, quantizer): + self.quantizer = quantizer + + def set_hidden_heads(self, hidden_size, num_attention_heads, epsilon, intermediate_size): + """ + Args: + hidden_size: embedding dimension of the model + num_attention_heads: number of attention heads in the model + epsilon: epsilon value for layer norm (same value used for all norms) + intermediate_size: Size of MLP projection. If `DEFAULT_INTERMEDIATE_SIZE` is passed + it is assumed to be `4 * hidden_size` + """ + self.hidden_size = hidden_size + if intermediate_size == DEFAULT_INTERMEDIATE_SIZE: + self.intermediate_size = 4 * hidden_size + else: + self.intermediate_size = intermediate_size + self.num_attention_heads = num_attention_heads + self.layernorm_epsilon = epsilon + + def set_attention(self, qkvw, qkvb, dense_w, dense_b): + self.qkvw = qkvw + self.qkvb = qkvb + self.dense_w = dense_w + self.dense_b = dense_b + + def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): + self._h4h_w = _h4h_w + self._h4h_b = _h4h_b + self._4hh_w = _4hh_w + self._4hh_b = _4hh_b + + def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + self.attn_nw = attn_nw + self.attn_nb = attn_nb + self.input_nw = input_nw + self.input_nb = input_nb + + def apply_weight_quantization(self): + # quantize attention weights + self.attention_quantization() + + # quantize mlp weights + self.mlp_quantization() + + def attention_quantization(self): + self.module.attention.attn_qkvw = self.quantizer.quantize(self.module.attention.attn_qkvw) + self.module.attention.attn_ow = self.quantizer.quantize(self.module.attention.attn_ow) + + def mlp_quantization(self): + self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w) + self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w) + + def apply_tensor_parallelism(self, mp_replace): + # setup the new Attention module + self.attention_qkv_mp(mp_replace) + self.attention_o_mp(mp_replace) + + # setup the new MLP module + self.mlp_inter_mp(mp_replace) + self.mlp_output_mp(mp_replace) + + # Apply weight quantization + # TODO(cmikeh2): Re-enable this once verified + #self.apply_weight_quantization() + + def attention_qkv_mp(self, mp_replace, reversed_dim=False): + self.module.attention.attn_qkvw = mp_replace.strided_copy(self.module.attention.attn_qkvw, + self.qkvw, + num_splits=3, + int8=reversed_dim) + self.module.attention.attn_qkvb = mp_replace.strided_copy(self.module.attention.attn_qkvb, + self.qkvb, + num_splits=3, + int8=reversed_dim) + + def attention_o_mp(self, mp_replace, reversed_dim=False): + self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, self.dense_w, int8=reversed_dim) + self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob, + self.dense_b, + int8=reversed_dim, + allocate_tensor=reversed_dim) + + def mlp_inter_mp(self, mp_replace, reversed_dim=False): + self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w, int8=reversed_dim) + self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b, int8=reversed_dim) + + def mlp_output_mp(self, mp_replace, reversed_dim=False): + self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w, int8=reversed_dim) + self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b, + self._4hh_b, + int8=reversed_dim, + allocate_tensor=reversed_dim) + + def copy_data_to_new_module(self): + params = {'attn_nw': self.attn_nw, 'attn_nb': self.attn_nb} + for key in params: + if params[key] is None: + setattr(self.module.mlp, key, None) + else: + setattr(self.module.mlp, key, + torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name()))) + + params = {'norm_w': self.input_nw, 'norm_b': self.input_nb} + for key in params: + if params[key] is None: + setattr(self.module, key, None) + else: + setattr(self.module, key, + torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name()))) + + def transpose(self): + self.transpose_attention() + self.transpose_mlp() + + def transpose_attention(self): + if self.attn_linear_layer: + self.qkvw = self.transpose_impl(self.qkvw.data) + self.dense_w = self.transpose_impl(self.dense_w.data) + + def transpose_mlp(self): + if self.mlp_linear_layer: + self._h4h_w = self.transpose_impl(self._h4h_w.data) + self._4hh_w = self.transpose_impl(self._4hh_w.data) + + def transpose_impl(self, data): + data = data.contiguous() + data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1)) + data = data.reshape(data.shape[-1], data.shape[-2]) + data.to(get_accelerator().current_device_name()) + return data + + def get_all_params(self): + params = [ + self.attn_nw, + self.attn_nb, + self.input_nw, + self.input_nb, + ] + + params.extend(self.get_attn_params()) + params.extend(self.get_mlp_params()) + + return params + + def get_attn_params(self): + return [self.qkvw, self.qkvb, self.dense_w, self.dense_b] + + def get_mlp_params(self): + return [self._h4h_w, self._h4h_b, self._4hh_w, self._4hh_b] diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base_moe.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base_moe.py new file mode 100644 index 0000000000000000000000000000000000000000..4be1b849ba70da04b6b08ea011c27bbbcf96b8bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/base_moe.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. +from .base import * +from deepspeed import comm as dist +import deepspeed.ops.transformer as transformer_inference +from deepspeed.accelerator import get_accelerator + + +class BaseTransformerMoEContainer(BaseTransformerContainer): + + def __init__(self, **kwargs): + # Call the init function of the parent class to initialize the tensors and configs from parent class + super().__init__(**kwargs) + + self.num_experts = self.policy.get_num_experts() + self.ep_world_size = dist.get_world_size() + self.local_ep_size = 1 if self.num_experts < self.ep_world_size else self.num_experts // self.ep_world_size + + self.layer_norm_eps = self.config.layer_norm_eps if hasattr(self.config, 'layer_norm_eps') else 1e-12, + + # MoE models will have a list of mlp related tensors + self._h4h_w = [] + self._h4h_b = [] + self._4hh_w = [] + self._4hh_b = [] + + # Residual MoE needs extra parameters + self._res_h4h_w = None + self._res_h4h_b = None + self._res_4hh_w = None + self._res_4hh_b = None + self._res_coef = None + + def create_ds_model_config(self): + self.set_hidden_heads(*self.policy.get_hidden_heads()) + assert self.num_attention_heads % self.mp_size == 0,\ + "To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\ + "This is because the attention computation is partitioned evenly among the parallel GPUs." + + self.ds_model_config = transformer_inference.DeepSpeedMoEInferenceConfig( + hidden_size=self.hidden_size, + heads=self.num_attention_heads, + layer_norm_eps=self.layer_norm_eps, + fp16=self.fp16, + pre_layer_norm=self.pre_layer_norm, + mp_size=self.mp_size, + q_int8=self.quantize, + moe_experts=self.local_ep_size, + global_experts=self.num_experts, + mlp_type=self.config.moe.type, + scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx, + ) + + return self.ds_model_config + + def initialize_tensors(self): + # Set the tensors from policy (user module) to container (DS module) + self.set_attention(*self.policy.attention()) + self.set_mlp(self.config.moe.type) + self.set_layernorm(*self.policy.layernorm()) + + def set_mlp(self, config_moe_type): + if config_moe_type == 'standard': + self._h4h_w, self._h4h_b, \ + self._4hh_w, self._4hh_b = self.policy.mlp() + else: + self._h4h_w, self._h4h_b, self._4hh_w, \ + self._4hh_b, self._res_h4h_w, self._res_h4h_b, \ + self._res_4hh_w, self._res_4hh_b, \ + self._res_coef = self.policy.mlp(config_moe_type) + + def transpose(self): + self.transpose_attention() + self.transpose_mlp() + + if self.config.moe.type == 'residual': + self.transpose_residual() + + def transpose_mlp(self): + self._h4h_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._h4h_w] + self._4hh_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._4hh_w] + + def transpose_residual(self): + self._res_h4h_w.data = self.transpose_impl(self._res_h4h_w.data) + self._res_4hh_w.data = self.transpose_impl(self._res_4hh_w.data) + self._res_coef.data = self.transpose_impl(self._res_coef.data) + + def apply_tensor_parallelism(self, mp_replace): + # setup the new Attention module + self.attention_qkv_mp(mp_replace) + self.attention_o_mp(mp_replace) + + # quantize attention weights + self.attention_quantization() + + # setup the new MLP module + self.mlp_mp() + + def mlp_mp(self): + gpu_index = dist.get_rank() + for ep_index in range(self.local_ep_size): + # mlp inter + self.module.mlp[ep_index].inter_w.data = self._h4h_w[gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + self.module.mlp[ep_index].inter_b.data = self._h4h_b[gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + + # mlp output + self.module.mlp[ep_index].output_w.data = self._4hh_w[gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + self.module.mlp[ep_index].output_b.data = self._4hh_b[gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + + def copy_data_to_new_module(self): + self.module.attn_nw.data = self.attn_nw.to(get_accelerator().current_device_name()) + self.module.attn_nb.data = self.attn_nb.to(get_accelerator().current_device_name()) + + self.module.norm_w.data.copy_(self.input_nw.to(get_accelerator().current_device_name())) + self.module.norm_b.data.copy_(self.input_nb.to(get_accelerator().current_device_name())) + + if self.config.moe.type == 'residual': + self.module.res_mlp.inter_w.data = self._res_h4h_w.to(get_accelerator().current_device_name()) + self.module.res_mlp.inter_b.data = self._res_h4h_b.to(get_accelerator().current_device_name()) + self.module.res_mlp.output_w.data = self._res_4hh_w.to(get_accelerator().current_device_name()) + self.module.res_mlp.output_b.data = self._res_4hh_b.to(get_accelerator().current_device_name()) + self.module.res_coef.data = self._res_coef.to(get_accelerator().current_device_name()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bert.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bert.py new file mode 100644 index 0000000000000000000000000000000000000000..20ae575f45144733a82b609eed21284746cb95d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bert.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_BERTContainer(BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.return_tuple = True + self.triangular_masking = False + self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFBertLayerPolicy(TransformerPolicy): + + def __init__(self, client_module, inference=False): + super().__init__(inference, pre_attn_norm=False) + self.client_module = client_module + self.cuda_graph_supported = True + + if HFBertLayerPolicy._orig_layer_class is None: + try: + import transformers + HFBertLayerPolicy._orig_layer_class = [ + transformers.models.bert.modeling_bert.BertLayer, + transformers.models.roberta.modeling_roberta.RobertaLayer + ] + except: + HFBertLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + if self.pre_attn_norm: + attention_layernorm = self.client_module.PostAttentionLayerNorm + else: + attention_layernorm = self.client_module.attention.output.LayerNorm + return self.client_module.attention.self.query.weight.shape[1], \ + self.client_module.attention.self.num_attention_heads, \ + attention_layernorm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + qw = self.client_module.attention.self.query.weight + qb = self.client_module.attention.self.query.bias + kw = self.client_module.attention.self.key.weight + kb = self.client_module.attention.self.key.bias + vw = self.client_module.attention.self.value.weight + vb = self.client_module.attention.self.value.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training) + + return qkvw, \ + qkvb, \ + self.client_module.attention.output.dense.weight, \ + self.client_module.attention.output.dense.bias, \ + + def mlp(self, enable_training=False): + if self.pre_attn_norm: + intermediate_ff = self.client_module.intermediate.dense_act + else: + intermediate_ff = self.client_module.intermediate.dense + + return intermediate_ff.weight, intermediate_ff.bias, \ + self.client_module.output.dense.weight, \ + self.client_module.output.dense.bias + + def layernorm(self): + if self.pre_attn_norm: + attention_layernorm = self.client_module.PostAttentionLayerNorm + transformer_layernorm = self.client_module.PreAttentionLayerNorm + else: + attention_layernorm = self.client_module.attention.output.LayerNorm + transformer_layernorm = self.client_module.output.LayerNorm + return attention_layernorm.weight, \ + attention_layernorm.bias, \ + transformer_layernorm.weight, \ + transformer_layernorm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bloom.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bloom.py new file mode 100644 index 0000000000000000000000000000000000000000..a78ac81203460d352788783c200dc3f14e4f8e60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/bloom.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from .features.hybrid_engine import HybridEngineContainer +from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy + +from ..policy import maybe_get_lora + +supported_models = {None} + + +class DS_BloomContainer(MetaTensorContainer, HybridEngineContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.bigscience_bloom = True + self.triangular_masking = False + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + + self.module = DeepSpeedBloomInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + self.module.config.invert_mask = False + return self.module + + def attention_qkv_mp(self, mp_replace, reversed_dim=False): + self.module.attention.attn_qkvw = mp_replace.copy(self.module.attention.attn_qkvw, self.qkvw) + self.module.attention.attn_qkvb = mp_replace.copy(self.module.attention.attn_qkvb, self.qkvb) + + def get_lora_matched_pair(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params() + ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)] + return ret + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h, self.policy. + client_module.self_attention.query_key_value, self.policy.client_module.self_attention.dense + ] + ] + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attention.query_key_value.weight', \ + 'self_attention.query_key_value.bias', \ + 'self_attention.dense.weight', \ + 'self_attention.dense.bias', \ + 'mlp.dense_h_to_4h.weight', \ + 'mlp.dense_h_to_4h.bias', \ + 'mlp.dense_4h_to_h.weight', \ + 'mlp.dense_4h_to_h.bias', \ + 'post_attention_layernorm.weight', \ + 'post_attention_layernorm.bias', \ + 'input_layernorm.weight', \ + 'input_layernorm.bias' + ) + for i in range(0, 2): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i], + qkv=True, + megatron_v2=self.policy.is_megatron_v2, + split_qkv=self.policy.split_qkv) + for i in range(2, 4): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i], + prefix + param_names[i]) + for i in range(4, 10): + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i], + prefix + param_names[i]) + for i in range(10, 12): + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i]) + + +class BLOOMLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True, use_load_prefix=True, split_qkv=False): + super().__init__(inference, linear_layer=True, use_load_prefix=use_load_prefix, split_qkv=split_qkv) + self.client_module = client_module + try: + import transformers + BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock + global supported_models + supported_models.update({transformers.models.bloom.modeling_bloom.BloomModel}) + except Exception as e: + print(f"WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: {e}") + BLOOMLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.self_attention.hidden_size, \ + self.client_module.self_attention.num_heads, \ + self.client_module.input_layernorm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + return self.client_module.self_attention.query_key_value.weight, \ + self.client_module.self_attention.query_key_value.bias, \ + self.client_module.self_attention.dense.weight, \ + self.client_module.self_attention.dense.bias, + + def mlp(self, enable_training=False): + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/clip.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..afe4a76086d80ceec7dce15c9cfb2a9b8718b0c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/clip.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_CLIPContainer(BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFCLIPLayerPolicy(TransformerPolicy): + + def __init__(self, client_module, inference=False): + super().__init__(inference, pre_attn_norm=True, scale_attention=True) + self.client_module = client_module + self.cuda_graph_supported = True + + if HFCLIPLayerPolicy._orig_layer_class is None: + try: + import transformers + HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer + except: + HFCLIPLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.self_attn.q_proj.weight.shape[1], \ + self.client_module.self_attn.num_heads, \ + self.client_module.layer_norm1.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + qw = self.client_module.self_attn.q_proj.weight + qb = self.client_module.self_attn.q_proj.bias + kw = self.client_module.self_attn.k_proj.weight + kb = self.client_module.self_attn.k_proj.bias + vw = self.client_module.self_attn.v_proj.weight + vb = self.client_module.self_attn.v_proj.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training) + + return qkvw, \ + qkvb, \ + self.client_module.self_attn.out_proj.weight, \ + self.client_module.self_attn.out_proj.bias + + def mlp(self, enable_training=False): + return self.client_module.mlp.fc1.weight, \ + self.client_module.mlp.fc1.bias, \ + self.client_module.mlp.fc2.weight, \ + self.client_module.mlp.fc2.bias + + def layernorm(self): + return self.client_module.layer_norm2.weight, \ + self.client_module.layer_norm2.bias, \ + self.client_module.layer_norm1.weight, \ + self.client_module.layer_norm1.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/distil_bert.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/distil_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..ecd0562438b5ac634cf8b4536fd3413d0f9ed9d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/distil_bert.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_DistilBERTContainer(BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.triangular_masking = False + self.return_single_tuple = True + self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFDistilBertLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=False, preln=False): + super().__init__(inference) + self.client_module = client_module + self.preln = preln + self.cuda_graph_supported = True + if HFDistilBertLayerPolicy._orig_layer_class is None: + try: + import transformers + HFDistilBertLayerPolicy._orig_layer_class = [ + transformers.models.distilbert.modeling_distilbert.TransformerBlock, + ] + except: + HFDistilBertLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attention.q_lin.weight.shape[1], \ + self.client_module.attention.n_heads, \ + self.client_module.sa_layer_norm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + qw = self.client_module.attention.q_lin.weight + qb = self.client_module.attention.q_lin.bias + kw = self.client_module.attention.k_lin.weight + kb = self.client_module.attention.k_lin.bias + vw = self.client_module.attention.v_lin.weight + vb = self.client_module.attention.v_lin.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training) + + return qkvw, \ + qkvb, \ + self.client_module.attention.out_lin.weight, \ + self.client_module.attention.out_lin.bias + + def mlp(self, enable_training=False): + intermediate_ff = self.client_module.ffn.lin1 + + return intermediate_ff.weight, intermediate_ff.bias, \ + self.client_module.ffn.lin2.weight, \ + self.client_module.ffn.lin2.bias + + def layernorm(self): + attention_layernorm = self.client_module.sa_layer_norm + transformer_layernorm = self.client_module.output_layer_norm + return attention_layernorm.weight, \ + attention_layernorm.bias, \ + transformer_layernorm.weight, \ + transformer_layernorm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc2eb2a65531e61bcea078c0035bd2ee6ee861a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .gated_mlp import HybridGatedMLPContainer +from .megatron import MegatronContainer +from .meta_tensor import MetaTensorContainer +from .split_qkv import HybridSplitQKVContainer diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08e145b3375397ecf485473c96148337f0fa54fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/gated_mlp.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/gated_mlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..839d8df317a088e5e5ff03087edaa68d84d6cf3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/gated_mlp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e9065a66941e79d555719cabcd8a432a2b54c45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_megatron.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_megatron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257027a26774222c7c28b1e10a47cca5d2b5fab1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/hybrid_megatron.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/megatron.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/megatron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4c143cfd40b09c866a62c266efb3f15248a5713 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/megatron.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/meta_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/meta_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a65173857adf92b299aba41f4c9b62543ab6cbc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/meta_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/split_qkv.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/split_qkv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..223112dabe447135f736f6bed182dc7518851583 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/__pycache__/split_qkv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/gated_mlp.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/gated_mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..24f0826db14ed08f373a2824f845ac6f9d7d9508 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/gated_mlp.py @@ -0,0 +1,118 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod + +from .hybrid_engine import HybridEngineContainer + + +class HybridGatedMLPContainer(HybridEngineContainer): + """ + The HybridGatedMLPContainer supports models for which the first MLP layer + is represented with two separate weights, one for the activation function + and one for the gating function. + """ + + def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): + super().set_mlp(_h4h_w, _h4h_b, _4hh_w, _4hh_b) + self.set_mlp_gate() + + @abstractmethod + def set_mlp_gate(self): + """ + In `set_mlp_gate`, it is necessary to populate the following variables (where appropriate) + for the given model: + self.inter_up_w: inter up weight + self.inter_up_b: inter up bias + self.inter_gate_w: inter gate weight + self.inter_gate_b: inter gate bias + If the parameter does not exist in the original model, set the attribute to None. + """ + raise NotImplementedError("A set_mlp_gate() function must be defined in the model container \ + in order to set the unfused inter up and gate tensors.") + + def mlp_inter_mp(self, mp_replace, reversed_dim=False): + # Only need to alter behavior if we can't do the normal destructive copy + if self.module.mlp.inter_w is None: + params = [ + (self.module.mlp.inter_up_w, self.inter_up_w), + (self.module.mlp.inter_up_b, self.inter_up_b), + (self.module.mlp.inter_gate_w, self.inter_gate_w), + (self.module.mlp.inter_gate_b, self.inter_gate_b), + ] + for dst, src in params: + dst = mp_replace.copy(dst[:self.inter_up_w.shape[0] // mp_replace.mp_size], + src, + int8=reversed_dim, + allocate_tensor=reversed_dim) if src is not None else None + else: + self.module.mlp.inter_w = mp_replace.strided_copy(self.module.mlp.inter_w, + self._h4h_w, + num_splits=2, + int8=reversed_dim) + self.module.mlp.inter_b = mp_replace.strided_copy(self.module.mlp.inter_b, + self._h4h_b, + num_splits=2, + int8=reversed_dim) + + def release_mlp(self): + super().release_mlp() + gated_mlp_params = [ + (self.module.mlp.inter_up_w, self.inter_up_w), + (self.module.mlp.inter_up_b, self.inter_up_b), + (self.module.mlp.inter_gate_w, self.inter_gate_w), + (self.module.mlp.inter_gate_b, self.inter_gate_b), + ] + + self._release_params(gated_mlp_params) + + def reset_mlp(self): + self._h4h_w.data[:self.inter_up_w.shape[0]] = self.inter_up_w.data + self._h4h_w.data[self.inter_up_w.shape[0]:] = self.inter_gate_w.data + + if self.inter_up_b is not None: + self._h4h_b.data[:self.inter_up_b.shape[0]] = self.inter_up_b.data + self._h4h_b.data[self.inter_up_b.shape[0]:] = self.inter_gate_b.data + + inter_data = [self.inter_up_w.data, self.inter_gate_w.data] + if self.inter_up_b is not None: + inter_data.extend([self.inter_up_b.data, self.inter_gate_b.data]) + + self.inter_up_w.data = self._h4h_w.data[:self.inter_up_w.shape[0]] + self.inter_gate_w.data = self._h4h_w.data[self.inter_up_w.shape[0]:] + + if self.inter_up_b is not None: + self.inter_up_b.data = self._h4h_b.data[:self.inter_up_b.shape[0]] + self.inter_gate_b.data = self._h4h_b.data[self.inter_up_b.shape[0]:] + + for data in inter_data: + del data + + def set_mlp_params_wo_copy(self, Z3_enabled=False): + self.module.mlp.output_w = self._4hh_w + self.module.mlp.output_b = self._4hh_b + + if not Z3_enabled: + # In initialize_tensors, we create a fused inter projection with the appropriate shape + # and copy the up projection and gate projection into it + self.module.mlp.inter_w = self._h4h_w + self.module.mlp.inter_b = self._h4h_b + + self.inter_up_w.data = self._h4h_w[:self.inter_up_w.shape[0], :] + self.inter_gate_w.data = self._h4h_w[self.inter_up_w.shape[0]:, :] + + if self.inter_up_b is not None: + self.inter_up_b.data = self._h4h_b[:self.inter_up_w.shape[0]] if self._h4h_b is not None else None + self.inter_gate_b.data = self._h4h_b[self.inter_up_w.shape[0]:] if self._h4h_b is not None else None + else: + self.module.mlp.inter_up_w = self.inter_up_w + self.module.mlp.inter_up_b = self.inter_up_b + self.module.mlp.inter_gate_w = self.inter_gate_w + self.module.mlp.inter_gate_b = self.inter_gate_b + + def get_mlp_params(self): + params = super().get_mlp_params() + params.extend([self.inter_up_w, self.inter_up_b, self.inter_gate_w, self.inter_gate_b]) + return params diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_engine.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..3f702abcf319a6db997b6e2607dcba26f3440841 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_engine.py @@ -0,0 +1,212 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from typing import List, Tuple + +import torch + + +class HybridEngineContainer(ABC): + """ + This container identifies which methods need to be overridden in addition to + the base container to enable use in the RLHF pipeline. These methods are not + necessary for inference alone. + + NOTE: If you are using this feature with a container that + also inherits from `MetaTensorContainer`, ensure that `MetaTensorContainer` + is inherited before `HybridEngineContainer` in the class definition. + """ + + def initialize_tensors(self, enable_training=False): + """ + Same purposes as the base container, but also grabs the hooks for any LoRA + parameters. If it's necessary to override specific sub-components of the model, + it's best to augment the specific `set_[component]` itself rather than modifying + the `initialize_tensors` method. See the `HybridSplitQKVContainer` for an example. + """ + super().initialize_tensors(enable_training=enable_training) + self.set_lora_params() + + def transform_for_training(self): + """ + If the views on certain parameters are largely incompatible, it may be necessary to do + more substantial transformations to the parameters. This method should be overridden to + transform the inference format to what is necessary for training. + """ + pass + + def transform_for_inference(self): + """ + If the views on certain parameters are largely incompatible, it may be necessary to do + more substantial transformations to the parameters. This method should be overridden to + transform the training format to what is necessary for inference. + """ + pass + + @abstractmethod + def set_lora_params(self): + """ + If available, set the LoRA parameters for the module. An implementation + for this would iterate over all parameters of the model and use the `maybe_get_lora` helper + method to check if the parameter does in fact have any LoRA params. + """ + raise NotImplementedError("A set_lora_params() function must be defined for the relevant parameters.") + + @abstractmethod + def get_lora_matched_pair(self): + """Get the pair of lora params and its matched model parameters.""" + raise NotImplementedError("get_lora_matched_pair() must be defined for the relevant parameters.") + + def fuse_lora(self): + """Fuse the LoRA parameters for the inference mode.""" + for maybe_lora_param, param in self.get_lora_matched_pair(): + if len(maybe_lora_param) == 3: + lora_right_weight, \ + lora_left_weight, \ + lora_scaling = maybe_lora_param + param.data += lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t()) + + def unfuse_lora(self): + """Unfuse the LoRA parameters for the training mode.""" + for maybe_lora_param, param in self.get_lora_matched_pair(): + if len(maybe_lora_param) == 3: + lora_right_weight, \ + lora_left_weight, \ + lora_scaling = maybe_lora_param + param.data -= lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t()) + + def apply_tensor_parallelism(self, mp_replace, reversed_dim=False): + """ + Add support for reversed dim in tensor parallelism. If necessary, override + the called methods to handle partitioned weights (i.e. if qkv is split, override + the `attention_qkv_mp` method). If the model component is not split, it should + be safe to use the default implementation. + """ + # Setup the new Attention module + self.attention_qkv_mp(mp_replace, reversed_dim=reversed_dim) + self.attention_o_mp(mp_replace, reversed_dim=reversed_dim) + + # Setup the new MLP module + self.mlp_inter_mp(mp_replace, reversed_dim=reversed_dim) + self.mlp_output_mp(mp_replace, reversed_dim=reversed_dim) + + # Apply weight quantization + # TODO(cmikeh2): Re-enable this once verified + #self.apply_weight_quantization() + + def _release_params(self, param_pairs: List[Tuple[torch.Tensor, torch.Tensor]]): + """ + Helper for `release_[component]` methods. Accepts a list of tuples where the first + element is the module param that needs to be deleted, and the second is the reassignment + from the container. + """ + for module_param, container_param in param_pairs: + if module_param is not None: + del module_param + module_param = container_param + + def release_memory(self): + """ + Delete module parameters if they exist and point them back to the container. The primary + purpose of this is for TP-inference with ZeRO-3. In this scenario, we need to delete the + parameters we've created for inference to free their memory. + """ + general_params = [ + (self.module.attention.attn_ow, self.dense_w), + (self.module.attention.attn_ob, self.dense_b), + (self.module.mlp.attn_nw, self.attn_nw), + (self.module.mlp.attn_nb, self.attn_nb), + (self.module.norm_w, self.input_nw), + (self.module.norm_b, self.input_nb), + ] + + self._release_params(general_params) + + self.release_qkv() + self.release_mlp() + + def release_qkv(self): + """ + Release for QKV parameters (as well as any aliases). + """ + qkv_params = [ + (self.module.attention.attn_qkvw, self.qkvw), + (self.module.attention.attn_qkvb, self.qkvb), + ] + + self._release_params(qkv_params) + + def release_mlp(self): + """ + Release for MLP parameters (as well as any aliases). + """ + mlp_params = [ + (self.module.mlp.inter_w, self._h4h_w), + (self.module.mlp.inter_b, self._h4h_b), + (self.module.mlp.output_w, self._4hh_w), + (self.module.mlp.output_b, self._4hh_b), + ] + + self._release_params(mlp_params) + + def reset_params(self): + """ + The purpose of reset params is to get the weights from the FP16 training + copy of the model and copy to them to contiguous inference view. This only needs + to be performed when the container parameters cannot be used directly for inference. + """ + self.reset_qkv() + self.reset_mlp() + + def reset_qkv(self): + """ + Perform any necessary resets of the model parameters for the QKV components. + """ + pass + + def reset_mlp(self): + """ + Perform any necessary resets of the model parameters for the MLP components. + """ + pass + + def get_lora_params(self): + """ + Return a list of all parameters that would have LoRA for the module. + """ + if not hasattr(self, "lora_params"): + self.set_lora_params() + return self.lora_params + + def set_params_wo_copy(self, Z3_enabled=False): + """ + Rather than copying into, set the parameters directly. This is necessary to provide + an inexpensive (low-memory-overhead) view onto the FP16 forward weights. + """ + self.module.mlp.attn_nw = self.attn_nw + self.module.mlp.attn_nb = self.attn_nb + self.module.norm_w = self.input_nw + self.module.norm_b = self.input_nb + self.set_attn_params_wo_copy(Z3_enabled=Z3_enabled) + self.set_mlp_params_wo_copy(Z3_enabled=Z3_enabled) + + def set_attn_params_wo_copy(self, **kwargs): + """ + Narrower sub-method for finer grained overriding. + """ + self.module.attention.attn_ow = self.dense_w + self.module.attention.attn_ob = self.dense_b + self.module.attention.attn_qkvw = self.qkvw + self.module.attention.attn_qkvb = self.qkvb + + def set_mlp_params_wo_copy(self, **kwargs): + """ + Narrower sub-method for finer grained overriding. + """ + self.module.mlp.inter_w = self._h4h_w + self.module.mlp.inter_b = self._h4h_b + self.module.mlp.output_w = self._4hh_w + self.module.mlp.output_b = self._4hh_b diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_megatron.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..d40f2a6b57e86087dc13ff2b74a389c67826fa76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/hybrid_megatron.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .hybrid_engine import HybridEngineContainer +from .megatron import MegatronContainer + + +class HybridMegatronContainer(MegatronContainer, HybridEngineContainer): + + def _align_qkv(self, x: torch.Tensor): + """ + Internal helper for accepting the head-contiguous weight matrix and chunking + the query, key, and value components. + """ + attention_head_size = x.shape[0] // self.num_attention_heads + new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:] + x_1 = x.view(*new_x_shape) + div_dim = len(x_1.size()) - 2 if len(x.shape) == 2 else -1 + (q, k, v) = torch.split(x_1, (x_1.shape[div_dim] // 3), dim=div_dim) + if len(q.shape) > 2: + x.data.copy_( + torch.cat((q.reshape(-1, q.shape[-1]), k.reshape(-1, q.shape[-1]), v.reshape(-1, q.shape[-1])), + dim=0).reshape(x.shape)) + else: + x.data.copy_(torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)) + + def transform_for_inference(self) -> None: + """ + Overrides the HybridEngineContainer implementation. + + The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V + are sequential in memory. This is different from the default layout in which all of the Qs + are sequential, followed by all of the Ks, and then all of the Vs. Here, we take the default + layout and transform it to the inference layout. + """ + if hasattr(self.qkvw, 'ds_id'): + from deepspeed.runtime.zero import GatheredParameters + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + param_list = [self.qkvw, self.qkvb] + non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \ + param.ds_status == ZeroParamStatus.NOT_AVAILABLE)] + with GatheredParameters(non_active_params): + self._align_qkv(self.qkvw) + self._align_qkv(self.qkvb) + else: + self._align_qkv(self.qkvw) + self._align_qkv(self.qkvb) + + def _partition_qkv(self, x: torch.Tensor): + """ + Internal helper for taking contiguous QKV and partitioning it for contiguous + heads. + """ + q_k_v = torch.split(x, (x.shape[0] // 3), dim=0) + attention_head_size = q_k_v[0].shape[0] // self.num_attention_heads + new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:] + q, k, v = [data.view(*new_x_shape) for data in q_k_v] + if len(q.shape) > 2: + x.data.copy_(torch.cat((q, k, v), dim=-2).reshape(-1, q.shape[-1])) + else: + x.data.copy_(torch.cat((q, k, v), dim=-1).reshape(-1)) + + def transform_for_training(self): + """ + Overrides the HybridEngineContainer implementation. + + The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V + are sequential in memory. This is different from the default layout in which all of the Qs + are sequential, followed by all of the Ks, and then all of the Vs. This function takes the inference format and reverts it back to the default format. + """ + # If parameter is distributed, handle gathering it + if hasattr(self.qkvw, 'ds_id'): + from deepspeed.runtime.zero import GatheredParameters + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + param_list = [self.qkvw, self.qkvb] + non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \ + param.ds_status == ZeroParamStatus.NOT_AVAILABLE)] + with GatheredParameters(non_active_params): + self._partition_qkv(self.qkvw) + self._partition_qkv(self.qkvb) + else: + self._partition_qkv(self.qkvw) + self._partition_qkv(self.qkvb) diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/megatron.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/megatron.py new file mode 100644 index 0000000000000000000000000000000000000000..4daccf7d7c8d4db06bd65243bd83a5f1fd50df2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/megatron.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from abc import ABC + + +class MegatronContainer(ABC): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.megatron_v2 = self.policy.is_megatron_v2 + + def _align_qkv_transposed(self, x): + attention_head_size = x.shape[-1] // self.num_attention_heads + new_x_shape = x.size()[:-1] + (self.num_attention_heads, attention_head_size) + x_1 = x.view(*new_x_shape) + (q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1)) + if len(q.shape) > 2: + return torch.cat((q.reshape(q.shape[0], -1), k.reshape(q.shape[0], -1), v.reshape(q.shape[0], -1)), + dim=-1).reshape(x.shape) + else: + return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape) + + def transpose(self): + super().transpose() + if self.megatron_v2: + self.qkvw = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvw).contiguous()) + self.qkvb = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvb).contiguous()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/meta_tensor.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/meta_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb55bc74339b329930a4ee44ae35292227e7581 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/meta_tensor.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from packaging import version as pkg_version +import torch + + +class MetaTensorContainer(ABC): + """ + NOTE: If you are using this feature with a container that + also inherits from `HybridEngineContainer`, ensure that `MetaTensorContainer` + is inherited before `HybridEngineContainer` in the class definition. + """ + + def __init__(self, **kwargs): + if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__): + raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+") + super().__init__(**kwargs) + self.is_meta = False + self.ckpt_load_enabled = True + + def initialize_tensors(self, enable_training=False): + super().initialize_tensors(enable_training=enable_training) + self.is_meta = self.qkvw.is_meta + + def apply_tensor_parallelism(self, mp_replace, **kwargs): + if self.is_meta: + if self.qkvb is None: + self.module.attention.attn_qkvb = None + if self.dense_b is None: + self.module.attention.attn_ob = None + else: + super().apply_tensor_parallelism(mp_replace, **kwargs) + + def copy_data_to_new_module(self): + if self.is_meta: + if self.attn_nw is None: + self.module.mlp.attn_nw = self.attn_nw + self.module.mlp.attn_nb = self.attn_nb + else: + super().copy_data_to_new_module() + + def transpose(self): + if not self.is_meta: + super().transpose() + + @abstractmethod + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + """ + Load all the transformer parameter from the checkpoint file (sd). + In addition to the parameter names, we require two + more parameters to help read the data correctly + from the checkpoint and split the qkv heads in the + right order: + 1. `use_load_prefix` (Default: False): this specifies + whether we need to use the name of first abstraction + layer of the model for searching the parameter's name + in a checkpoint file. For more information of how this + is used please see + https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/load_checkpoint.py + 2. `split_qkv` (Default: True): we use this flag when splitting + the qkv parameter into heads. If it is False, it means the heads + of q, k, and v are stored together and needs to split in the + DeepSpeed-Inference API. + """ + raise NotImplementedError("A load_params() function must be defined in the model container \ + when inheriting the MetaTensorContainer feature") diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/split_qkv.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/split_qkv.py new file mode 100644 index 0000000000000000000000000000000000000000..f4c14d4e425a7e9096bae3bf5788ab7b9ca0dcc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/features/split_qkv.py @@ -0,0 +1,159 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +import torch + +from .hybrid_engine import HybridEngineContainer + + +class HybridSplitQKVContainer(HybridEngineContainer): + + def set_attention(self, qkvw, qkvb, dense_w, dense_b): + super().set_attention(qkvw, qkvb, dense_w, dense_b) + self.set_q_k_v() + + @abstractmethod + def set_q_k_v(self): + """ + In `set_q_k_v`, it is necessary to populate the following variables (where appropriate) + for the given model: + self.qw: q weight + self.qb: q bias + self.kw: k weight + self.kb: k bias + self.vw: v weight + self.vb: v bias + """ + raise NotImplementedError("A set_q_k_v() function must be defined in the model container \ + in order to set the unfused q, k, and v tensors.") + + def attention_qkv_mp(self, mp_replace, reversed_dim=False): + # Only need to alter + if self.module.attention.attn_qkvw is None: + params = [ + (self.module.attention.attn_qw, self.qw), + (self.module.attention.attn_qb, self.qb), + (self.module.attention.attn_kw, self.kw), + (self.module.attention.attn_kb, self.kb), + (self.module.attention.attn_vw, self.vw), + (self.module.attention.attn_vb, self.vb), + ] + for dst, src in params: + dst = mp_replace.copy( + dst[:self.qw.shape[0] // mp_replace.mp_size], src, int8=reversed_dim, + allocate_tensor=reversed_dim) if src is not None else None + else: + super().attention_qkv_mp(mp_replace) + + def release_qkv(self): + super().release_qkv() + split_qkv_params = [ + (self.module.attention.attn_qw, self.qw), + (self.module.attention.attn_qb, self.qb), + (self.module.attention.attn_kw, self.kw), + (self.module.attention.attn_kb, self.kb), + (self.module.attention.attn_vw, self.vw), + (self.module.attention.attn_vb, self.vb), + ] + + self._release_params(split_qkv_params) + + def reset_qkv(self): + self.qkvw.data[:self.qw.shape[0]] = self.qw.data + self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data + self.qkvw.data[2 * self.qw.shape[0]:] = self.vw.data + + qkv_data = [self.qw.data, self.kw.data, self.vw.data] + + self.qw.data = self.qkvw.data[:self.qw.shape[0]] + self.kw.data = self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] + self.vw.data = self.qkvw.data[2 * self.qw.shape[0]:] + + if self.qkvb is not None: + self.qkvb.data[:self.qw.shape[0]] = self.qb.data + self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data + self.qkvb.data[2 * self.qw.shape[0]:] = self.vb.data + + qkv_data.extend([self.qb.data, self.kb.data, self.vb.data]) + + self.qb.data = self.qkvb.data[:self.qw.shape[0]] + self.kb.data = self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] + self.vb.data = self.qkvb.data[2 * self.qw.shape[0]:] + + for data in qkv_data: + del data + + def reset_qkv_experimental(self): + """ + WIP - experimental and likely to be changed/improved. + Unused by keeping for now. + """ + if self.module.attention.attn_qkvw is None: + self.module.attention.attn_qkvw = torch.empty(self.qw.shape[0] * 3, + self.qw.shape[0], + dtype=self.qw.dtype, + device=self.qw.device) + self.module.attention.attn_qkvb = torch.empty(self.qw.shape[0] * 3, + dtype=self.qw.dtype, + device=self.qw.device) + self.module.attention.attn_qkvw.data[:self.qw.shape[0]] = self.qw.data + self.module.attention.attn_qkvb.data[:self.qw.shape[0]] = self.qb.data + self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data + self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data + self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:] = self.vw.data + self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:] = self.vb.data + + qkv_data = [self.qw.data, \ + self.qb.data, \ + self.kw.data, \ + self.kb.data, \ + self.vw.data, \ + self.vb.data] + + self.qw.data = self.module.attention.attn_qkvw.data[:self.qw.shape[0]] + self.qb.data = self.module.attention.attn_qkvb.data[:self.qw.shape[0]] + self.kw.data = self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] + self.kb.data = self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] + self.vw.data = self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:] + self.vb.data = self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:] + + for data in qkv_data: + del data + + def set_attn_params_wo_copy(self, Z3_enabled=False): + self.module.attention.attn_ow = self.dense_w + self.module.attention.attn_ob = self.dense_b + if not Z3_enabled: + # In initialize_tensors, we create a fused qkvw with the appropriate shape + # and copy the qw, qb, kw, kb, vw, vb into it + self.module.attention.attn_qkvw = self.qkvw + self.module.attention.attn_qkvb = self.qkvb + + # We reset the data for qw (which is the original model parameter) to point + # to the fused weight matrix we have created here + self.qw.data = self.qkvw[:self.qw.shape[0], :] + self.kw.data = self.qkvw[self.qw.shape[0]:2 * self.qw.shape[0], :] + self.vw.data = self.qkvw[self.qw.shape[0] * 2:, :] + + # Assume if one of the biases is not None, then all of them are not None + if self.qb is not None: + self.qb.data = self.qkvb[:self.qw.shape[0]] + self.kb.data = self.qkvb[self.qw.shape[0]:2 * self.qw.shape[0]] + self.vb.data = self.qkvb[self.qw.shape[0] * 2:] + else: + # In ZeRO-3 this will be managed by ZeRO and handled separately in the + # forward of ds_attention + self.module.attention.attn_qw = self.qw + self.module.attention.attn_qb = self.qb + self.module.attention.attn_kw = self.kw + self.module.attention.attn_kb = self.kb + self.module.attention.attn_vw = self.vw + self.module.attention.attn_vb = self.vb + + def get_attn_params(self): + params = super().get_attn_params() + params.extend([self.qw, self.qb, self.kw, self.kb, self.vw, self.vb]) + return params diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gpt2.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gpt2.py new file mode 100644 index 0000000000000000000000000000000000000000..7a19aac34b447a7d5116ccceeb7687faa61ea174 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gpt2.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +from ..policy import TransformerPolicy + + +class DS_GPT2Container(BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFGPT2LayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True): + # HuggingFace GPT2 uses convolutional layer instead of linear layer + super().__init__(inference, linear_layer=False) + self.client_module = client_module + try: + import transformers + HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block + except: + HFGPT2LayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.embed_dim, \ + self.client_module.attn.num_heads, \ + self.client_module.ln_1.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + return self.client_module.attn.c_attn.weight, \ + self.client_module.attn.c_attn.bias, \ + self.client_module.attn.c_proj.weight, \ + self.client_module.attn.c_proj.bias + + def mlp(self, enable_training=False): + return self.client_module.mlp.c_fc.weight, \ + self.client_module.mlp.c_fc.bias, \ + self.client_module.mlp.c_proj.weight, \ + self.client_module.mlp.c_proj.bias + + def layernorm(self): + return self.client_module.ln_2.weight, \ + self.client_module.ln_2.bias, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptj.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptj.py new file mode 100644 index 0000000000000000000000000000000000000000..17c0a5027a4c94dcc206b69ad6f3fa6d1d746e17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptj.py @@ -0,0 +1,132 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from .features.split_qkv import HybridSplitQKVContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv + +from ..policy import maybe_get_lora + + +class DS_GPTJContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.fc_in, self.policy.client_module.mlp.fc_out, + self.policy.client_module.attn.q_proj, self.policy.client_module.attn.k_proj, + self.policy.client_module.attn.v_proj, self.policy.client_module.attn.out_proj + ] + ] + + def get_lora_matched_pair(self): + fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw), + (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.attn.q_proj.weight + self.qb = None + self.kw = self.policy.client_module.attn.k_proj.weight + self.kb = None + self.vw = self.policy.client_module.attn.v_proj.weight + self.vb = None + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attn.q_proj.weight', \ + 'attn.k_proj.weight', \ + 'attn.v_proj.weight', \ + 'attn.out_proj.weight', \ + 'mlp.fc_in.weight', \ + 'mlp.fc_in.bias', \ + 'mlp.fc_out.weight', \ + 'mlp.fc_out.bias', \ + 'ln_1.weight', \ + 'ln_1.bias' + ) + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 4): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(4, 8): + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i], + prefix + param_names[i]) + for i in range(8, 10): + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i + 2], + prefix + param_names[i]) + + +class HFGPTJLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True): + super().__init__(inference, scale_attention=True) + self.client_module = client_module + try: + import transformers + HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock + except: + HFGPTJLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.embed_dim, \ + self.client_module.attn.num_attention_heads, \ + self.client_module.ln_1.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + qw = self.client_module.attn.q_proj.weight + kw = self.client_module.attn.k_proj.weight + vw = self.client_module.attn.v_proj.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + + return qkvw, \ + None, \ + self.client_module.attn.out_proj.weight, \ + None, + + def mlp(self, enable_training=False): + return self.client_module.mlp.fc_in.weight, \ + self.client_module.mlp.fc_in.bias, \ + self.client_module.mlp.fc_out.weight, \ + self.client_module.mlp.fc_out.bias + + def layernorm(self): + return None, \ + None, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneo.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneo.py new file mode 100644 index 0000000000000000000000000000000000000000..fca673b375e18a9b74741f86a9692bfd386f6a51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneo.py @@ -0,0 +1,145 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from .features.split_qkv import HybridSplitQKVContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv + +from ..policy import maybe_get_lora + + +class DS_GPTNEOContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.c_fc, self.policy.client_module.mlp.c_proj, + self.policy.client_module.attn.attention.q_proj, self.policy.client_module.attn.attention.k_proj, + self.policy.client_module.attn.attention.v_proj, self.policy.client_module.attn.attention.out_proj + ] + ] + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.attn.attention.q_proj.weight + self.qb = None + self.kw = self.policy.client_module.attn.attention.k_proj.weight + self.kb = None + self.vw = self.policy.client_module.attn.attention.v_proj.weight + self.vb = None + + def get_lora_matched_pair(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw), + (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attn.attention.q_proj.weight', \ + 'attn.attention.k_proj.weight', \ + 'attn.attention.v_proj.weight', \ + 'attn.attention.out_proj.weight', \ + 'attn.attention.out_proj.bias', \ + 'mlp.c_fc.weight', \ + 'mlp.c_fc.bias', \ + 'mlp.c_proj.weight', \ + 'mlp.c_proj.bias', \ + 'ln_2.weight', \ + 'ln_2.bias', \ + 'ln_1.weight', \ + 'ln_1.bias' + ) + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 5): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(5, 11): + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(11, 13): + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + + +class HFGPTNEOLayerPolicy(TransformerPolicy): + + def __init__(self, client_module, inference=True): + super().__init__(inference, scale_attention=False) + self.client_module = client_module + try: + import transformers + HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock + except: + HFGPTNEOLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.attention.embed_dim, \ + self.client_module.attn.attention.num_heads, \ + self.client_module.ln_1.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def get_q_k_v(self): + return self.client_module.attn.attention.q_proj.weight, \ + None, \ + self.client_module.attn.attention.k_proj.weight, \ + None, \ + self.client_module.attn.attention.v_proj.weight, \ + None + + def attention(self, enable_training=False): + qw = self.client_module.attn.attention.q_proj.weight + kw = self.client_module.attn.attention.k_proj.weight + vw = self.client_module.attn.attention.v_proj.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + + return qkvw, \ + None, \ + self.client_module.attn.attention.out_proj.weight, \ + self.client_module.attn.attention.out_proj.bias + + def mlp(self, enable_training=False): + return self.client_module.mlp.c_fc.weight, \ + self.client_module.mlp.c_fc.bias, \ + self.client_module.mlp.c_proj.weight, \ + self.client_module.mlp.c_proj.bias + + def layernorm(self): + return self.client_module.ln_2.weight, \ + self.client_module.ln_2.bias, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneox.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneox.py new file mode 100644 index 0000000000000000000000000000000000000000..3978b166b329b341232da61811ecaf5683c5578d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/gptneox.py @@ -0,0 +1,146 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from .features.hybrid_megatron import HybridMegatronContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from packaging import version as pkg_version + +from ..policy import maybe_get_lora + + +class DS_GPTNEOXContainer(MetaTensorContainer, HybridMegatronContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + def get_lora_matched_pair(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params() + ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)] + return ret + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + if GPTNEOXLayerPolicy.version == 0: + attention = self.policy.client_module.attention + else: + attention = self.policy.client_module.self_attention + + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h, + attention.query_key_value, attention.dense + ] + ] + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attention.query_key_value.weight', \ + 'attention.query_key_value.bias', \ + 'attention.dense.weight', \ + 'attention.dense.bias', \ + 'mlp.dense_h_to_4h.weight', \ + 'mlp.dense_h_to_4h.bias', \ + 'mlp.dense_4h_to_h.weight', \ + 'mlp.dense_4h_to_h.bias', \ + 'post_attention_layernorm.weight', \ + 'post_attention_layernorm.bias', \ + 'input_layernorm.weight', \ + 'input_layernorm.bias' + ) + for i in range(0, 2): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i], + qkv=True, + megatron_v2=self.policy.is_megatron_v2, + split_qkv=self.policy.split_qkv, + heads=self.policy.client_module.attention.num_attention_heads) + for i in range(2, 4): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i], + prefix + param_names[i]) + for i in range(4, 10): + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i], + prefix + param_names[i]) + for i in range(10, 12): + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i]) + + +class GPTNEOXLayerPolicy(TransformerPolicy): + _orig_layer_class = None + version = 0 + + def __init__(self, client_module, inference=True, megatron_v2=True, split_qkv=False): + super().__init__(inference, megatron_v2=megatron_v2, split_qkv=split_qkv) + self.client_module = client_module + if GPTNEOXLayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + GPTNEOXLayerPolicy._orig_layer_class = None + else: + try: + from transformers import GPTNeoXLayer + GPTNEOXLayerPolicy._orig_layer_class = GPTNeoXLayer + except ImportError: + GPTNEOXLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + if GPTNEOXLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return self.client_module.attention.hidden_size, \ + self.client_module.attention.num_attention_heads, \ + self.client_module.input_layernorm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + if GPTNEOXLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return attention.query_key_value.weight, \ + attention.query_key_value.bias, \ + attention.dense.weight, \ + attention.dense.bias + + def mlp(self, enable_training=False): + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/internlm.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/internlm.py new file mode 100644 index 0000000000000000000000000000000000000000..31255d4b3ca5a0199ee572219b6d75fd70d478c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/internlm.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import importlib + +import torch +from torch.nn.parameter import Parameter + +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +from deepspeed.utils.types import ActivationFuncType, NormType + +from ..policy import (TransformerPolicy, maybe_copy, maybe_copy_geglu, maybe_copy_qkv, maybe_get_lora, + transformer_param_names) +from .base import * +from .features import HybridGatedMLPContainer, HybridSplitQKVContainer + + +class DS_InternLMContainer(HybridGatedMLPContainer, HybridSplitQKVContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + + _config.rotate_half = True + _config.rotate_every_two = False + _config.rotary_dim = self.hidden_size // self.num_attention_heads + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.up_proj.weight, self.policy.client_module.mlp.gate_proj.weight, + self.policy.client_module.mlp.down_proj.weight, self.policy.client_module.self_attn.q_proj.weight, + self.policy.client_module.self_attn.k_proj.weight, self.policy.client_module.self_attn.v_proj.weight, + self.policy.client_module.self_attn.o_proj.weight + ] + ] + + def get_lora_matched_pair(self): + up_proj_lora, gate_proj_lora, down_proj_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(up_proj_lora, self.inter_up_w), (gate_proj_lora, self.inter_gate_w), (down_proj_lora, self._4hh_w), + (out_lora, self.dense_w), (q_lora, self.qw), (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.self_attn.q_proj.weight + self.qb = self.policy.client_module.self_attn.q_proj.bias + self.kw = self.policy.client_module.self_attn.k_proj.weight + self.kb = self.policy.client_module.self_attn.k_proj.bias + self.vw = self.policy.client_module.self_attn.v_proj.weight + self.vb = self.policy.client_module.self_attn.v_proj.bias + + def set_mlp_gate(self): + """ + Necessary to implement for `HybridGatedMLPContainer` + """ + self.inter_up_w = self.policy.client_module.mlp.up_proj.weight + self.inter_up_b = None + self.inter_gate_w = self.policy.client_module.mlp.gate_proj.weight + self.inter_gate_b = None + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attn.q_proj.weight', \ + 'self_attn.k_proj.weight', \ + 'self_attn.v_proj.weight', \ + 'self_attn.o_proj.weight', \ + 'mlp.up_proj.weight', \ + 'mlp.gate_proj.weight', \ + 'mlp.down_proj.weight', \ + 'input_layernorm.weight', \ + 'post_attention_layernorm.weight' + 'self_attn.q_proj.bias', \ + 'self_attn.k_proj.bias', \ + 'self_attn.v_proj.bias', \ + 'self_attn.o_proj.bias', \ + ) + + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvb', [prefix + param_names[9], prefix + param_names[10], prefix + param_names[11]], + split_qkv=self.policy.split_qkv) + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[2], + prefix + param_names[3]) + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[3], + prefix + param_names[12]) + maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w', + [prefix + param_names[4], prefix + param_names[5]]) + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6]) + + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7]) + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8]) + + +class InternLMLayerPolicy(TransformerPolicy): + _orig_layer_class = [] + _orig_layer_class_inited = False + + def __init__(self, client_module, inference=True): + super().__init__( + inference, + mlp_act_func_type=ActivationFuncType.GATED_SILU, + norm_type=NormType.RMSNorm, + ) + self.client_module = client_module + + self._init_orig_layer_class_once() + + def _init_orig_layer_class_once(self): + if InternLMLayerPolicy._orig_layer_class_inited: + return + + for sub_pkg in ['', '.internlm-7b', '.internlm-chat-7b']: + try: + from transformers.utils import TRANSFORMERS_DYNAMIC_MODULE_NAME + module = importlib.import_module(f"{TRANSFORMERS_DYNAMIC_MODULE_NAME}{sub_pkg}.modeling_internlm") + if module.InternLMDecoderLayer not in InternLMLayerPolicy._orig_layer_class: + InternLMLayerPolicy._orig_layer_class.append(module.InternLMDecoderLayer) + except ImportError: + continue + + InternLMLayerPolicy._orig_layer_class_inited = True + + def get_hidden_heads(self): + return self.client_module.self_attn.q_proj.weight.shape[1], \ + self.client_module.self_attn.num_heads, \ + self.client_module.input_layernorm.variance_epsilon, \ + self.client_module.mlp.gate_proj.weight.shape[0] + + def attention(self, enable_training=False): + qw = self.client_module.self_attn.q_proj.weight + kw = self.client_module.self_attn.k_proj.weight + vw = self.client_module.self_attn.v_proj.weight + qb = self.client_module.self_attn.q_proj.bias + kb = self.client_module.self_attn.k_proj.bias + vb = self.client_module.self_attn.v_proj.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training) + + return qkvw, \ + qkvb, \ + self.client_module.self_attn.o_proj.weight, \ + self.client_module.self_attn.o_proj.bias + + def mlp(self, enable_training=False): + mlp1_up = self.client_module.mlp.up_proj.weight + mlp1_gate = self.client_module.mlp.gate_proj.weight + mlp2 = self.client_module.mlp.down_proj.weight + + mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training) + + return mlp1, None, mlp2, None + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + None, \ + self.client_module.input_layernorm.weight, \ + None diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama.py new file mode 100644 index 0000000000000000000000000000000000000000..f6157e5cdfed8eeb4e15ba560c5e1a7a28b7c6f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features import HybridSplitQKVContainer, HybridGatedMLPContainer, MetaTensorContainer +from deepspeed.utils.types import ActivationFuncType, NormType +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter + +from ..policy import ( + TransformerPolicy, + transformer_param_names, + maybe_copy, + maybe_copy_qkv, + maybe_copy_geglu, + maybe_get_lora, +) + + +class DS_LLAMAContainer(MetaTensorContainer, HybridGatedMLPContainer, HybridSplitQKVContainer, + BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + + _config.rotate_half = True + _config.rotate_every_two = False + _config.rotary_dim = self.hidden_size // self.num_attention_heads + _config.rope_theta = self.policy.client_module.self_attn.rope_theta + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.mlp.up_proj.weight, self.policy.client_module.mlp.gate_proj.weight, + self.policy.client_module.mlp.down_proj.weight, self.policy.client_module.self_attn.q_proj.weight, + self.policy.client_module.self_attn.k_proj.weight, self.policy.client_module.self_attn.v_proj.weight, + self.policy.client_module.self_attn.o_proj.weight + ] + ] + + def get_lora_matched_pair(self): + up_proj_lora, gate_proj_lora, down_proj_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(up_proj_lora, self.inter_up_w), (gate_proj_lora, self.inter_gate_w), (down_proj_lora, self._4hh_w), + (out_lora, self.dense_w), (q_lora, self.qw), (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.self_attn.q_proj.weight + self.qb = None + self.kw = self.policy.client_module.self_attn.k_proj.weight + self.kb = None + self.vw = self.policy.client_module.self_attn.v_proj.weight + self.vb = None + + def set_mlp_gate(self): + """ + Necessary to implement for `HybridGatedMLPContainer` + """ + self.inter_up_w = self.policy.client_module.mlp.up_proj.weight + self.inter_up_b = None + self.inter_gate_w = self.policy.client_module.mlp.gate_proj.weight + self.inter_gate_b = None + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attn.q_proj.weight', \ + 'self_attn.k_proj.weight', \ + 'self_attn.v_proj.weight', \ + 'self_attn.o_proj.weight', \ + 'mlp.up_proj.weight', \ + 'mlp.gate_proj.weight', \ + 'mlp.down_proj.weight', \ + 'post_attention_layernorm.weight', \ + 'input_layernorm.weight', + ) + + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 4): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w', + [prefix + param_names[4], prefix + param_names[5]]) + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6]) + + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7]) + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8]) + + # This line is necessary for proper output when kernels + meta tensors are used in Llama models + # TODO: Investigate root-cause and fix meta tensor loading + module.mlp.output_b = None + + +class LLAMALayerPolicy(TransformerPolicy): + + def __init__(self, client_module, inference=True): + super().__init__( + inference, + mlp_act_func_type=ActivationFuncType.GATED_SILU, + norm_type=NormType.RMSNorm, + ) + self.client_module = client_module + try: + import transformers + LLAMALayerPolicy._orig_layer_class = transformers.models.llama.modeling_llama.LlamaDecoderLayer # type: ignore + except: + LLAMALayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + hidden_heads = ( + getattr(self.client_module.self_attn.q_proj.weight, "ds_shape", + self.client_module.self_attn.q_proj.weight.shape)[1], + self.client_module.self_attn.num_heads, + self.client_module.input_layernorm.variance_epsilon, + getattr(self.client_module.mlp.gate_proj.weight, "ds_shape", + self.client_module.mlp.gate_proj.weight.shape)[0], + ) + return hidden_heads + + def attention(self, enable_training=False): + qw = self.client_module.self_attn.q_proj.weight + kw = self.client_module.self_attn.k_proj.weight + vw = self.client_module.self_attn.v_proj.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + + return qkvw, \ + None, \ + self.client_module.self_attn.o_proj.weight, \ + None + + def mlp(self, enable_training=False): + mlp1_up = self.client_module.mlp.up_proj.weight + mlp1_gate = self.client_module.mlp.gate_proj.weight + mlp2 = self.client_module.mlp.down_proj.weight + + mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training) + + return mlp1, None, mlp2, None + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + None, \ + self.client_module.input_layernorm.weight, \ + None diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama2.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama2.py new file mode 100644 index 0000000000000000000000000000000000000000..b531890ab859dfbff51a401819846299d8e77c08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/llama2.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features import HybridSplitQKVContainer, HybridGatedMLPContainer, MetaTensorContainer +from deepspeed.utils.types import ActivationFuncType, NormType +from deepspeed.model_implementations.transformers.ds_llama2 import DeepSpeedLlama2Inference +import torch +from torch.nn.parameter import Parameter + +from ..policy import ( + TransformerPolicy, + transformer_param_names, + maybe_copy, + maybe_copy_qkv, + maybe_copy_geglu, + maybe_get_lora, +) + + +class DS_LLAMA2Container(MetaTensorContainer, HybridGatedMLPContainer, HybridSplitQKVContainer, + BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + + _config.rotate_half = False + _config.rotate_every_two = True + _config.rotary_dim = self.hidden_size // self.num_attention_heads + _config.num_kv = self.policy.client_module.attention.n_kv_heads + self.module = DeepSpeedLlama2Inference(_config, mp_group=self.mp_group) + + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.feed_forward.w3.weight, self.policy.client_module.feed_forward.w1.weight, + self.policy.client_module.feed_forward.w2.weight, self.policy.client_module.attention.wq.weight, + self.policy.client_module.attention.wk.weight, self.policy.client_module.attention.wv.weight, + self.policy.client_module.attention.wo.weight + ] + ] + + def get_lora_matched_pair(self): + up_proj_lora, gate_proj_lora, down_proj_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(up_proj_lora, self.inter_up_w), (gate_proj_lora, self.inter_gate_w), (down_proj_lora, self._4hh_w), + (out_lora, self.dense_w), (q_lora, self.qw), (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.attention.wq.weight + self.qb = None + self.kw = self.policy.client_module.attention.wk.weight + self.kb = None + self.vw = self.policy.client_module.attention.wv.weight + self.vb = None + + def set_mlp_gate(self): + """ + Necessary to implement for `HybridGatedMLPContainer` + """ + self.inter_up_w = self.policy.client_module.feed_forward.w2.weight + self.inter_up_b = None + self.inter_gate_w = self.policy.client_module.feed_forward.w1.weight + self.inter_gate_b = None + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attention.wq.weight', \ + 'attention.wk.weight', \ + 'attention.wv.weight', \ + 'attention.wo.weight', \ + 'feed_forward.w3.weight', \ + 'feed_forward.w1.weight', \ + 'feed_forward.w2.weight', \ + 'ffn_norm.weight', \ + 'attention_norm.weight' + ) + + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 4): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1], + prefix + param_names[i]) + maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w', + [prefix + param_names[4], prefix + param_names[5]]) + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6]) + + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7]) + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8]) + + +class LLAMA2LayerPolicy(TransformerPolicy): + + def __init__(self, client_module, inference=True): + super().__init__( + inference, + mlp_act_func_type=ActivationFuncType.GATED_SILU, + norm_type=NormType.RMSNorm, + ) + self.client_module = client_module + try: + import llama + LLAMA2LayerPolicy._orig_layer_class = llama.model.TransformerBlock # type: ignore + except: + LLAMA2LayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attention.wq.weight.shape[1], \ + self.client_module.n_heads, \ + self.client_module.ffn_norm.eps, \ + (self.client_module.feed_forward.w1.weight.shape[0] * \ + deepspeed.comm.get_world_size() if deepspeed.comm.is_initialized() else 1) # this is a hack to inject when model is already partitioned! + + def attention(self, enable_training=False): + qw = self.client_module.attention.wq.weight + kw = self.client_module.attention.wk.weight + vw = self.client_module.attention.wv.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + + return qkvw, \ + None, \ + self.client_module.attention.wo.weight, \ + None + + def mlp(self, enable_training=False): + mlp1_up = self.client_module.feed_forward.w3.weight + mlp1_gate = self.client_module.feed_forward.w1.weight + mlp2 = self.client_module.feed_forward.w2.weight + + mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training) + + return mlp1, None, mlp2, None + + def layernorm(self): + return self.client_module.ffn_norm.weight, \ + None, \ + self.client_module.attention_norm.weight, \ + None diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..2851dd246d99c94235cc6c0abce05f0c73375138 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt.py @@ -0,0 +1,117 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features.megatron import MegatronContainer +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +import torch +from ..policy import TransformerPolicy +from packaging import version as pkg_version + + +class DS_MegatronGPTContainer(MegatronContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + +# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp +# TODO: Generalize MoE overall goal, expand beyond Megatron +class MegatronLayerPolicy(TransformerPolicy): + _orig_layer_class = None + version = 0 + moe_type = 'standard' + megatron_v2 = True + use_mup = False + + def __init__(self, client_module, inference=True): + super().__init__(inference, megatron_v2=MegatronLayerPolicy.megatron_v2, use_mup=MegatronLayerPolicy.use_mup) + self.client_module = client_module + # we use megatron version to differentiate between the old and new + # megatron-lm source code + if MegatronLayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + MegatronLayerPolicy._orig_layer_class = None + else: + try: + from megatron.model.transformer import ParallelTransformerLayer + MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer + MegatronLayerPolicy.version = 1 + except ImportError: + MegatronLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + if MegatronLayerPolicy.version == 0: + return self.client_module.attention.query_key_value.weight.shape[1], \ + self.client_module.attention.num_attention_heads, \ + self.client_module.input_layernorm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + else: + return self.client_module.self_attention.query_key_value.weight.shape[1], \ + self.client_module.self_attention.num_attention_heads, \ + self.client_module.input_layernorm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + if self.inference: + if MegatronLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return attention.query_key_value.weight, \ + attention.query_key_value.bias, \ + attention.dense.weight, \ + attention.dense.bias + + def mlp(self, moe_type='standard', enable_training=False): + from deepspeed.moe.utils import has_moe_layers + moe, _ = has_moe_layers(self.client_module) + + if moe: + moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \ + self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts + num_experts = len(moe_experts) + if moe_type == 'standard': + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)] + else: + + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \ + self.client_module.mlp.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.mlp.dense_4h_to_h.bias, \ + self.client_module.mlp.coefficient.weight + + else: + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt_moe.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt_moe.py new file mode 100644 index 0000000000000000000000000000000000000000..c4063be05b6c5f232470685a292be51667c027a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/megatron_gpt_moe.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .base_moe import * +from .features.megatron import MegatronContainer +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +import torch +from .megatron_gpt import MegatronLayerPolicy +from packaging import version as pkg_version + + +class DS_MegatronGPTMoEContainer(MegatronContainer, BaseTransformerMoEContainer): + + def __init__(self, policy, config, model_config, layer_id): + super().__init__(policy, config, model_config, layer_id) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + +# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp +# TODO: Generalize MoE overall goal, expand beyond Megatron +class MegatronMoELayerPolicy(MegatronLayerPolicy): + _orig_layer_class = None + version = 0 + moe_type = 'standard' + num_experts = 1 + + def __init__(self, client_module, inference=True): + super().__init__(inference) + self.client_module = client_module + # we use megatron version to differentiate between the old and new + # megatron-lm source code + if MegatronMoELayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + MegatronMoELayerPolicy._orig_layer_class = None + else: + try: + from megatron.model.transformer import ParallelTransformerLayer + MegatronMoELayerPolicy._orig_layer_class = ParallelTransformerLayer + except ImportError: + MegatronMoELayerPolicy._orig_layer_class = None + + def get_num_experts(self): + return self.num_experts + + def mlp(self, moe_type='standard', enable_training=False): + # for now, all of this is tightly coupled to megatron-deepspeed moe implementation + # todo: think and refactor this to be more general + + #from deepspeed.moe.utils import has_moe_layers + #moe, _ = has_moe_layers(self.client_module) + + moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \ + self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts + num_experts = len(moe_experts) + self.num_experts = num_experts + + if moe_type == 'standard': + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)] + else: + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \ + self.client_module.mlp.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.mlp.dense_4h_to_h.bias, \ + self.client_module.mlp.coefficient.weight diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/opt.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/opt.py new file mode 100644 index 0000000000000000000000000000000000000000..eba619c79d6ebf5325c12f42c921b77dcb6e5ad7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/opt.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base import * +from .features import MetaTensorContainer, HybridSplitQKVContainer +from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv +from ..policy import maybe_get_lora +from deepspeed.utils.types import ActivationFuncType + + +class DS_OPTContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer): + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedOPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def set_lora_params(self): + """ + Necessary to implement for `HybridEngineContainer` + """ + self.lora_params = [ + maybe_get_lora(p) for p in [ + self.policy.client_module.fc1, + self.policy.client_module.fc2, + self.policy.client_module.self_attn.q_proj, + self.policy.client_module.self_attn.k_proj, + self.policy.client_module.self_attn.v_proj, + self.policy.client_module.self_attn.out_proj, + ] + ] + + def set_q_k_v(self): + """ + Necessary to implement for `HybridSplitQKVContainer` + """ + self.qw = self.policy.client_module.self_attn.q_proj.weight + self.qb = self.policy.client_module.self_attn.q_proj.bias + self.kw = self.policy.client_module.self_attn.k_proj.weight + self.kb = self.policy.client_module.self_attn.k_proj.bias + self.vw = self.policy.client_module.self_attn.v_proj.weight + self.vb = self.policy.client_module.self_attn.v_proj.bias + + def get_lora_matched_pair(self): + fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params() + ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw), + (k_lora, self.kw), (v_lora, self.vw)] + return ret + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attn.q_proj.weight', \ + 'self_attn.k_proj.weight', \ + 'self_attn.v_proj.weight', \ + 'self_attn.q_proj.bias', \ + 'self_attn.k_proj.bias', \ + 'self_attn.v_proj.bias', \ + 'self_attn.out_proj.weight', \ + 'self_attn.out_proj.bias', \ + 'fc1.weight', \ + 'fc1.bias', \ + 'fc2.weight', \ + 'fc2.bias', \ + 'final_layer_norm.weight', \ + 'final_layer_norm.bias', \ + 'self_attn_layer_norm.weight', \ + 'self_attn_layer_norm.bias' + ) + + for i in range(0, 6, 3): + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i // 3], + [prefix + param_names[i], prefix + param_names[i + 1], prefix + param_names[i + 2]], + split_qkv=self.policy.split_qkv) + for i in range(6, 8): + maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4], + prefix + param_names[i]) + for i in range(8, 14): + maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4], + prefix + param_names[i]) + for i in range(14, 16): + maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4], + prefix + param_names[i]) + + +class HFOPTLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True, use_load_prefix=True): + super().__init__(inference, linear_layer=True, pre_attn_norm=True, use_load_prefix=use_load_prefix) + self.client_module = client_module + try: + import transformers + HFOPTLayerPolicy._orig_layer_class = transformers.models.opt.modeling_opt.OPTDecoderLayer + except: + HFOPTLayerPolicy._orig_layer_class = None + + if hasattr(TransformerPolicy, "hf_model_config") and hasattr(TransformerPolicy.hf_model_config, + "activation_function"): + if TransformerPolicy.hf_model_config.activation_function == "relu": + self.mlp_act_func_type = ActivationFuncType.ReLU + elif TransformerPolicy.hf_model_config.activation_function in ["gelu", "gelu_new"]: + self.mlp_act_func_type = ActivationFuncType.GELU + else: + raise ValueError("Unsupported activation function: {}".format( + TransformerPolicy.hf_model_config.activation_function)) + else: + self.mlp_act_func_type = ActivationFuncType.ReLU # default + + def get_hidden_heads(self): + return self.client_module.self_attn.embed_dim, \ + self.client_module.self_attn.num_heads, \ + self.client_module.self_attn_layer_norm.eps, \ + DEFAULT_INTERMEDIATE_SIZE + + def attention(self, enable_training=False): + qw = self.client_module.self_attn.q_proj.weight + qb = self.client_module.self_attn.q_proj.bias + + kw = self.client_module.self_attn.k_proj.weight + kb = self.client_module.self_attn.k_proj.bias + + vw = self.client_module.self_attn.v_proj.weight + vb = self.client_module.self_attn.v_proj.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training) + return qkvw, \ + qkvb, \ + self.client_module.self_attn.out_proj.weight, \ + self.client_module.self_attn.out_proj.bias + + def mlp(self, enable_training=False): + return self.client_module.fc1.weight, \ + self.client_module.fc1.bias, \ + self.client_module.fc2.weight, \ + self.client_module.fc2.bias + + def layernorm(self): + return self.client_module.final_layer_norm.weight, \ + self.client_module.final_layer_norm.bias, \ + self.client_module.self_attn_layer_norm.weight, \ + self.client_module.self_attn_layer_norm.bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/unet.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/unet.py new file mode 100644 index 0000000000000000000000000000000000000000..48179265553150824855140750da3fc6e41503ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/unet.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from torch.nn.parameter import Parameter + +from ..policy import DSPolicy +from ...model_implementations.diffusers.unet import DSUNet + + +class UNetPolicy(DSPolicy): + + def __init__(self): + super().__init__() + try: + import diffusers + self._orig_layer_class = diffusers.models.unet_2d_condition.UNet2DConditionModel + except AttributeError: + self._orig_layer_class = diffusers.models.unets.unet_2d_condition.UNet2DConditionModel + except ImportError: + self._orig_layer_class = None + + def match(self, module): + return isinstance(module, self._orig_layer_class) + + def match_replaced(self, module): + return isinstance(module, DSUNet) + + def apply(self, module, enable_cuda_graph=True): + # TODO(cmikeh2): Enable cuda graph should be an inference configuration + return DSUNet(module, enable_cuda_graph=enable_cuda_graph) + + def attention(self, client_module): + qw = client_module.to_q.weight + kw = client_module.to_k.weight + vw = client_module.to_v.weight + + if qw.shape[1] == kw.shape[1]: + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + + return qkvw, \ + client_module.to_out[0].weight, \ + client_module.to_out[0].bias, \ + qw.shape[-1], \ + client_module.heads + else: + #return None + #kvw = Parameter(torch.cat((kw, vw), dim=0), requires_grad=False) + return qw, \ + kw, vw, \ + client_module.to_out[0].weight, \ + client_module.to_out[0].bias, \ + qw.shape[-1], \ + client_module.heads diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/vae.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..d26d0ef77ca9d10e27bad47859dba589190af7b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/containers/vae.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..policy import DSPolicy +from ...model_implementations.diffusers.vae import DSVAE + + +class VAEPolicy(DSPolicy): + + def __init__(self): + super().__init__() + try: + import diffusers + if hasattr(diffusers.models, "autoencoders"): + # Diffusers >= 0.25.0 + # Changes location to 'autoencoders' directory + self._orig_layer_class = diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL + elif hasattr(diffusers.models.vae, "AutoencoderKL"): + # Diffusers < 0.12.0 + self._orig_layer_class = diffusers.models.vae.AutoencoderKL + else: + # Diffusers >= 0.12.0 & < 0.25.0 + # Changes location of AutoencoderKL + self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL + except ImportError: + self._orig_layer_class = None + + def match(self, module): + return isinstance(module, self._orig_layer_class) + + def match_replaced(self, module): + return isinstance(module, DSVAE) + + def apply(self, module, enable_cuda_graph=True): + # TODO(cmikeh2): Enable cuda graph should be an inference configuration + return DSVAE(module, enable_cuda_graph=enable_cuda_graph) + + # NOTE (lekurile): Should we have a diffusers policy class? + def attention(self, client_module): + pass diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/config.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/config.py new file mode 100644 index 0000000000000000000000000000000000000000..5a8ca6ecf5cd208b855c6ed81d09712705bbe763 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/config.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.pydantic_v1 import root_validator +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + + +def get_monitor_config(param_dict): + monitor_dict = {key: param_dict.get(key, {}) for key in ("tensorboard", "wandb", "csv_monitor")} + return DeepSpeedMonitorConfig(**monitor_dict) + + +class TensorBoardConfig(DeepSpeedConfigModel): + """Sets parameters for TensorBoard monitor.""" + + enabled: bool = False + """ Whether logging to Tensorboard is enabled. Requires `tensorboard` package is installed. """ + + output_path: str = "" + """ + Path to where the Tensorboard logs will be written. If not provided, the + output path is set under the training script’s launching path. + """ + + job_name: str = "DeepSpeedJobName" + """ Name for the current job. This will become a new directory inside `output_path`. """ + + +class WandbConfig(DeepSpeedConfigModel): + """Sets parameters for WandB monitor.""" + + enabled: bool = False + """ Whether logging to WandB is enabled. Requires `wandb` package is installed. """ + + group: str = None + """ Name for the WandB group. This can be used to group together runs. """ + + team: str = None + """ Name for the WandB team. """ + + project: str = "deepspeed" + """ Name for the WandB project. """ + + +class CSVConfig(DeepSpeedConfigModel): + """Sets parameters for CSV monitor.""" + + enabled: bool = False + """ Whether logging to local CSV files is enabled. """ + + output_path: str = "" + """ + Path to where the csv files will be written. If not provided, the output + path is set under the training script’s launching path. + """ + + job_name: str = "DeepSpeedJobName" + """ Name for the current job. This will become a new directory inside `output_path`. """ + + +class DeepSpeedMonitorConfig(DeepSpeedConfigModel): + """Sets parameters for various monitoring methods.""" + + tensorboard: TensorBoardConfig = {} + """ TensorBoard monitor, requires `tensorboard` package is installed. """ + + wandb: WandbConfig = {} + """ WandB monitor, requires `wandb` package is installed. """ + + csv_monitor: CSVConfig = {} + """ Local CSV output of monitoring data. """ + + @root_validator + def check_enabled(cls, values): + values["enabled"] = values.get("tensorboard").enabled or values.get("wandb").enabled or values.get( + "csv_monitor").enabled + return values diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/csv_monitor.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/csv_monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..c7a19b14ad8227fc8187bbe161667285b7d0c717 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/csv_monitor.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .monitor import Monitor +import os + +import deepspeed.comm as dist + + +class csvMonitor(Monitor): + + def __init__(self, csv_config): + super().__init__(csv_config) + self.filenames = [] + self.enabled = csv_config.enabled + self.output_path = csv_config.output_path + self.job_name = csv_config.job_name + self.log_dir = self.setup_log_dir() + + def setup_log_dir(self, base=os.path.join(os.path.expanduser("~"), "csv_monitor")): + if self.enabled and dist.get_rank() == 0: + if self.output_path is not None: + log_dir = os.path.join(self.output_path, self.job_name) + # NOTE: This code path currently is never used since the default tensorboard_output_path is an empty string and not None. Saving it in case we want this functionality in the future. + else: + if "DLWS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLWS_JOB_ID"] + elif "DLTS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLTS_JOB_ID"] + else: + infra_job_id = "unknown-job-id" + + csv_monitor_dir_name = os.path.join(infra_job_id, "logs") + log_dir = os.path.join(base, csv_monitor_dir_name, self.job_name) + os.makedirs(log_dir, exist_ok=True) + return log_dir + + def write_events(self, event_list): + if self.enabled and dist.get_rank() == 0: + import csv + # We assume each event_list element is a tensorboard-style tuple in the format: (log_name: String, value, step: Int) + for event in event_list: + log_name = event[0] + value = event[1] + step = event[2] + + # Set the header to the log_name + # Need this check because the deepspeed engine currently formats log strings to separate with '/' + if '/' in log_name: + record_splits = log_name.split('/') + header = record_splits[len(record_splits) - 1] + else: + header = log_name + + # sanitize common naming conventions into filename + filename = log_name.replace('/', '_').replace(' ', '_') + fname = self.log_dir + '/' + filename + '.csv' + + # Open file and record event. Insert header if this is the first time writing + with open(fname, 'a+') as csv_monitor_file: + csv_monitor_writer = csv.writer(csv_monitor_file) + if filename not in self.filenames: + self.filenames.append(filename) + csv_monitor_writer.writerow(['step', header]) + csv_monitor_writer.writerow([step, value]) diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/utils.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..265fc98115534b58da53f3589132d3a7e7373ecb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/utils.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + + +def check_tb_availability(): + try: + # torch.utils.tensorboard will fail if `tensorboard` is not available, + # see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html + import tensorboard # noqa: F401 # type: ignore + except ImportError: + print('If you want to use tensorboard logging, please `pip install tensorboard`') + raise + + +def check_wandb_availability(): + try: + import wandb # noqa: F401 # type: ignore + except ImportError: + print( + 'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart' + ) + raise diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/wandb.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/wandb.py new file mode 100644 index 0000000000000000000000000000000000000000..30209191171afc4c0ad3ff7088639f7d678cd505 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/wandb.py @@ -0,0 +1,38 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .utils import check_wandb_availability +from .monitor import Monitor + +import deepspeed.comm as dist + + +class WandbMonitor(Monitor): + + def __init__(self, wandb_config): + super().__init__(wandb_config) + check_wandb_availability() + import wandb + + self.enabled = wandb_config.enabled + self.group = wandb_config.group + self.team = wandb_config.team + self.project = wandb_config.project + + if self.enabled and dist.get_rank() == 0: + wandb.init(project=self.project, group=self.group, entity=self.team) + + def log(self, data, step=None, commit=None, sync=None): + if self.enabled and dist.get_rank() == 0: + import wandb + return wandb.log(data, step=step, commit=commit, sync=sync) + + def write_events(self, event_list): + if self.enabled and dist.get_rank() == 0: + for event in event_list: + label = event[0] + value = event[1] + step = event[2] + self.log({label: value}, step=step) diff --git a/venv/lib/python3.10/site-packages/deepspeed/pipe/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/pipe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f25471713b4568b43216fe16786a46904575ae5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/pipe/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..runtime.pipe import PipelineModule, LayerSpec, TiedLayerSpec diff --git a/venv/lib/python3.10/site-packages/deepspeed/pipe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/pipe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c552d4a2b76c3f98a177f040c45be43e660a7ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/pipe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz new file mode 100644 index 0000000000000000000000000000000000000000..8bddf805c36b29dc449556c27a2b489691f841af --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9 +size 2648 diff --git a/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz new file mode 100644 index 0000000000000000000000000000000000000000..50e9348dcca79eae861e67092add93cdb8ff1ca3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ce8155a6cba0c1bf0a2441a10c228191f916dec36cb820723429811296bba8 +size 3138 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6bee618e7d52bb31a9700b33642a1dfbfb02ee91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eefa5f8b2a452e465518c29630626ee17c5a5eff022e165520382cff2b1966a8 +size 4466608 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/linalg/tests/propack_test_data.npz b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/tests/propack_test_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..0bf01015610346655c749ead87a47d5575e2b67b --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/linalg/tests/propack_test_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfe34d9a92353e08f400f3837136e553a8e91d441186913d39b59bf8a627bba3 +size 600350 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py2.npz b/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py2.npz new file mode 100644 index 0000000000000000000000000000000000000000..d4459ff2786fabe4bcf4653d880cbf0afd4bfdcf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py2.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bac27f1a3eb1fdd102dae39b7dd61ce83e82f096388e344e14285071984d01fa +size 846 diff --git a/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py3.npz b/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py3.npz new file mode 100644 index 0000000000000000000000000000000000000000..e40a38584bc4647621601075d946ce46a8e065dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/sparse/tests/data/csc_py3.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b1b84315c7077417e720512d086a5a6217c2875b818d27704ae9b7237c69dfe +size 851 diff --git a/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d1edcdc79787e4c8b263cdf1a935eaa66cc6599e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dcbab05f1218b2a892e811851853948182de20022046c1df908d5b10cbfb3f8 +size 1572553 diff --git a/venv/lib/python3.10/site-packages/scipy/special/tests/data/gsl.npz b/venv/lib/python3.10/site-packages/scipy/special/tests/data/gsl.npz new file mode 100644 index 0000000000000000000000000000000000000000..b090dae17b5b0403f7c4919c46a464a09509aeab --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/tests/data/gsl.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acab700208cbb301ee51eb1f512cb1c27e7b4e7533fc5a5f5cd5c5d6aa197dd8 +size 51433 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c8f550dfe4db93fb65903ad7864f8468b6b923db --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb2b0ab521b8ccf28b093089948ada4e83208f13e121ce0d2314c6a92e300e2c +size 1589832 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..adda664a7b5442fc0977ddbaa572c864ddd31f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf18c1f2d65a232bf2c7121282df31bf2a8be827afafc4ed810ed37457ee898a +size 183728 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..6c41166721b891a801cdc6828804c6da7233d625 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fee99512bab4ccc6569b47b924e4b034e1cdbab5624fafc7e120648bd5f7a128 +size 183688