diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/ds_to_universal.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/ds_to_universal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c494dcd4ae95ec5518470e9de1c9b13c4ca2d500 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/ds_to_universal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/zero_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/zero_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dc8bc0e31de468f2c64154881b017429829d701 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/zero_checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049fe412db2860bd31f79782cdbb7eb2369e8815 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/basic_layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/basic_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626ac480aec52da5ab9e5adc8115626fd8d9d9b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/basic_layer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/compress.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/compress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f9252feaefc37564a31d306a77c3293fb9f592f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/compress.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb089f11c60f355d28dcd40447a461ae0c471b5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09bb8aaabceb165955f4f349189047ff7c521ba7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6141a31241c03854f63381002659568f0a9f22d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7477598e08ae1665b0464d76baca87e4cb535e14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aafa54bc3b35c0dd66c10e7db7047ab27aeaf8a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/compression/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd00fec935b826c821c8ca9d62e2711a91ea811 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +from .v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig +from .v2.engine_v2 import InferenceEngineV2 +from .v2 import build_hf_engine, build_engine_from_ds_checkpoint diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/config.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5018aaa75b80d8d008da6bd68fa173f56b1a58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/config.py @@ -0,0 +1,304 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import deepspeed +from deepspeed.pydantic_v1 import Field, validator +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from deepspeed.runtime.zero.config import DeepSpeedZeroConfig +from typing import Dict, Union +from enum import Enum + + +class DtypeEnum(Enum): + # The torch dtype must always be the first value (so we return torch.dtype) + fp16 = torch.float16, "torch.float16", "fp16", "float16", "half" + fp32 = torch.float32, "torch.float32", "fp32", "float32", "float" + bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat" + int8 = torch.int8, "torch.int8", "int8" + + # Copied from https://stackoverflow.com/a/43210118 + # Allows us to use multiple values for each Enum index and returns first + # listed value when Enum is called + def __new__(cls, *values): + obj = object.__new__(cls) + # first value is canonical value + obj._value_ = values[0] + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + def __repr__(self): + return "<%s.%s: %s>" % ( + self.__class__.__name__, + self._name_, + ", ".join([repr(v) for v in self._all_values]), + ) + + +class MoETypeEnum(str, Enum): + residual = "residual" + standard = "standard" + + +class DeepSpeedTPConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + enabled: bool = True + """ Turn tensor parallelism on/off. """ + + tp_size: int = 1 + """ Number of devices to split the model across using tensor parallelism. """ + + mpu: object = None + """ + A model parallelism unit object that implements + ``get_{model,data}_parallel_{rank,group,world_size}()``. + """ + + tp_group: object = None + + +class DeepSpeedMoEConfig(DeepSpeedConfigModel): + """ Sets parameters for MoE """ + + enabled: bool = True + ep_size: int = 1 + """ + The expert-parallelism size which is used for partitioning the experts + across the GPUs in the expert-parallel group. + """ + + moe_experts: list = Field([1], alias="num_experts") + """ The global number of experts used in an MoE layer. """ + + type: MoETypeEnum = MoETypeEnum.standard + """ + Specify the type of MoE layer. We have two types of MoE layer: 'Standard' + and 'Residual'. + """ + + ep_mp_group: object = None + ep_group: object = Field(None, alias="expert_group") + + +class QuantTypeEnum(str, Enum): + asym = "asymmetric" + sym = "symmetric" + + +class BaseQuantConfig(DeepSpeedConfigModel): + enabled = True + num_bits = 8 + q_type: QuantTypeEnum = QuantTypeEnum.sym + q_groups: int = 1 + + +class WeightQuantConfig(BaseQuantConfig): + enabled = True + quantized_initialization: Dict = {} + post_init_quant: Dict = {} + + +class ActivationQuantConfig(BaseQuantConfig): + enabled = True + + +class QKVQuantConfig(DeepSpeedConfigModel): + enabled = True + + +class QuantizationConfig(DeepSpeedConfigModel): + enabled: bool = True + activation: ActivationQuantConfig = ActivationQuantConfig() + weight: WeightQuantConfig = WeightQuantConfig() + qkv: QKVQuantConfig = QKVQuantConfig() + + +# todo: brainstorm on how to do ckpt loading for DS inference +class InferenceCheckpointConfig(DeepSpeedConfigModel): + checkpoint_dir: str = None + save_mp_checkpoint_path: str = None + base_dir: str = None + + +class DeepSpeedInferenceConfig(DeepSpeedConfigModel): + """ Sets parameters for DeepSpeed Inference Engine. """ + + replace_with_kernel_inject: bool = Field(False, alias="kernel_inject") + """ + Set to true to inject inference kernels for models such as, Bert, GPT2, + GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two + linear layers as a tuple: + `(attention_output projection, transformer output projection)` + """ + + dtype: DtypeEnum = torch.float16 + """ + Desired model data type, will convert model to this type. + Supported target types: `torch.half`, `torch.int8`, `torch.float` + """ + + tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp") + """ + Configuration for tensor parallelism used to split the model across several + GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`. + """ + + enable_cuda_graph: bool = False + """ + Use this flag for capturing the CUDA-Graph of the inference ops, so that it + can run faster using the graph replay method. + """ + + use_triton: bool = False + """ + Use this flag to use triton kernels for inference ops. + """ + + triton_autotune: bool = False + """ + Use this flag to enable triton autotuning. + Turning it on is better for performance but increase the 1st runtime for + autotuning. + """ + + zero: DeepSpeedZeroConfig = {} + """ + ZeRO configuration to use with the Inference Engine. Expects a dictionary + containing values for :any:`DeepSpeedZeroConfig`. + """ + + triangular_masking: bool = Field(True, alias="tm") + """ + Controls the type of masking for attention scores in transformer layer. + Note that the masking is application specific. + """ + + moe: Union[bool, DeepSpeedMoEConfig] = {} + """ + Specify if the type of Transformer is MoE. Expects a dictionary containing + values for :any:`DeepSpeedMoEConfig`. + """ + + quant: QuantizationConfig = {} + """ + NOTE: only works for int8 dtype. + Quantization settings used for quantizing your model using the MoQ. The + setting can be one element or a tuple. If one value is passed in, we + consider it as the number of groups used in quantization. A tuple is passed + in if we want to mention that there is extra-grouping for the MLP part of a + Transformer layer (e.g. (True, 8) shows we quantize the model using 8 + groups for all the network except the MLP part that we use 8 extra + grouping). Expects a dictionary containing values for + :any:`QuantizationConfig`. + """ + + #todo: refactor the following 3 into the new checkpoint_config + checkpoint: Union[str, Dict] = None + """ + Path to deepspeed compatible checkpoint or path to JSON with load policy. + """ + + base_dir: str = "" + """ + This shows the root directory under which all the checkpoint files exists. + This can be passed through the json config too. + """ + + set_empty_params: bool = False + """ + specifying whether the inference-module is created with empty or real Tensor + """ + + save_mp_checkpoint_path: str = None + """ + The path for which we want to save the loaded model with a checkpoint. This + feature is used for adjusting the parallelism degree to help alleviate the + model loading overhead. It does not save any new checkpoint if no path is + passed. + """ + + checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config") + """ + TODO: Add docs. Expects a dictionary containing values for + :any:`InferenceCheckpointConfig`. + """ + + return_tuple: bool = True + """ + Specify whether or not the transformer layers need to return a tuple or a + Tensor. + """ + + training_mp_size: int = 1 + """ + If loading a checkpoint this is the mp size that it was trained with, it + may be different than what the mp size that you want to use during + inference. + """ + + replace_method: str = Field( + "auto", + deprecated=True, + deprecated_msg="This parameter is no longer needed, please remove from your call to DeepSpeed-inference") + + injection_policy: Dict = Field(None, alias="injection_dict") + """ + Dictionary mapping a client nn.Module to its corresponding injection + policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}` + """ + + injection_policy_tuple: tuple = None + """ TODO: Add docs """ + + config: Dict = Field(None, alias="args") # todo: really no need for this field if we can refactor + + max_out_tokens: int = Field(1024, alias="max_tokens") + """ + This argument shows the maximum number of tokens inference-engine can work + with, including the input and output tokens. Please consider increasing it + to the required token-length required for your use-case. + """ + + min_out_tokens: int = Field(1, alias="min_tokens") + """ + This argument communicates to the runtime the minimum number of tokens you + expect you will need to generate. This will cause the runtime to error + if it unable to provide this and provide context on the memory pressure + rather than seg-faulting or providing corrupted output. + """ + + transposed_mode: bool = Field(False, alias="transposed_mode") + + mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size") + """ + Desired model parallel size, default is 1 meaning no model parallelism. + Deprecated, please use the ``tensor_parallel` config to control model + parallelism. + """ + mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu") + ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size") + ep_group: object = Field(None, alias="expert_group", deprecated=True, new_param="moe.ep_group") + ep_mp_group: object = Field(None, alias="expert_mp_group", deprecated=True, new_param="moe.ep_mp_group") + moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts") + moe_type: MoETypeEnum = Field(MoETypeEnum.standard, deprecated=True, new_param="moe.type") + + @validator("moe") + def moe_backward_compat(cls, field_value, values): + if isinstance(field_value, bool): + return DeepSpeedMoEConfig(moe=field_value) + return field_value + + @validator("use_triton") + def has_triton(cls, field_value, values): + if field_value and not deepspeed.HAS_TRITON: + raise ValueError('Triton needs to be installed to use deepspeed with triton kernels') + return field_value + + class Config: + # Get the str representation of the datatype for serialization + json_encoders = {torch.dtype: lambda x: str(x)} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/engine.py b/venv/lib/python3.10/site-packages/deepspeed/inference/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..5cdd99ff0b901ac695a389fd2193f9ae699b29fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/engine.py @@ -0,0 +1,636 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import time +import os +from deepspeed import comm as dist +from deepspeed.utils.logging import log_dist + +from torch.nn.modules import Module +from packaging import version as pkg_version +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine +from deepspeed.utils.timer import SynchronizedWallClockTimer + +from ..runtime.state_dict_factory import SDLoaderFactory +from ..runtime.weight_quantizer import WeightQuantization +from ..module_inject import replace_transformer_layer, generic_injection +from ..comm.comm import init_distributed +from ..pipe import PipelineModule +from ..moe.utils import has_moe_layers +from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing +from deepspeed.accelerator import get_accelerator +from ..module_inject.policy import TransformerPolicy +from ..module_inject.auto_tp import AutoTP + +from ..module_inject.replace_policy import generic_policies +from ..module_inject.auto_tp_model_utils import build_bloom_alibi_tensor, build_mpt_atten_bias_tensor, build_mpt_alibi_tensor, get_alibi_mask +from ..ops.transformer.inference.ds_attention import DeepSpeedSelfAttention +from ..model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + +DS_INFERENCE_ENABLED = False +from torch import nn + +INFERENCE_MODEL_TIMER = "model-forward-inference" + + +class InferenceEngine(Module): + inference_mp_group = None + inference_ep_group = None + expert_mp_group = None + + def __init__(self, model, config): + """ + Args: + model: torch.nn.Module + config: DeepSpeedInferenceConfig + """ + global DS_INFERENCE_ENABLED + DS_INFERENCE_ENABLED = True + + super().__init__() + + # Have to import here because inference_module is a global, but python + # globals only work at the module level and will not be updated unless + # we import it each time we init a new inference engine. + from ..model_implementations.transformers.ds_transformer import inference_module + if inference_module is not None: + self.destroy() + + self.module = model + self._config = config + + self._get_model_config_generate(config) # keep for weird backward compatibility + + # patch model generate with ours if model uses it + if hasattr(self.module, "generate"): + self.generate = self._generate + + if hasattr(self.module, "config"): + TransformerPolicy.hf_model_config = self.module.config + + if config.dtype == torch.half and not get_accelerator().is_fp16_supported(): + raise ValueError("Type fp16 is not supported.") + + # todo: keep this self.injection_dict because we don't use to change config.injection_policy API + # todo: this will get changed when Molly's PR on auto injection dict is merged + self.injection_dict = config.injection_policy + + # todo: refactor the mp_group and mp_size related in the next refactor + self.mp_group = config.tensor_parallel.tp_group + self.mpu = config.tensor_parallel.mpu + + #self._validate_args(self.mpu, config.replace_with_kernel_inject) + self.quantize_merge_count = 1 + self.quantization_scales = None + + # these are not needed in the config as we are creating them ourselves in the inference engine + self.ep_group = None # config.moe.ep_group + self.expert_mp_group = None # config.moe.ep_mp_group + + self.cuda_graph_created = False + self.checkpoint_engine = TorchCheckpointEngine() + quantization_setting = None + self._init_quantization_setting( + quantization_setting) # todo: update with the new quant config for weight quant + self.model_profile_enabled = False + self._model_times = [] + + if not self.injection_dict and config.replace_with_kernel_inject: + # This is a hack to remove the prepare_mask function on HF side for BLOOM architecture + self.remove_mask_prepare_for_bloom() + + if self.injection_dict or not config.replace_with_kernel_inject: + # This is a hack to redefine the alibi func due to TP + if config.tensor_parallel.tp_size > 1: + self.build_alibi_tensor() + self.build_attn_bias() + + if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph: + assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \ + "If you want to use cuda graph, please upgrade torch to at least v1.10" + + # convert model to intended dtype + if config.dtype: + self._convert_to_dtype(config) + + if self.mpu: + config.tensor_parallel.tp_size = dist.get_world_size(group=self.mpu.get_model_parallel_group()) + self.mp_group = self.mpu.get_model_parallel_group() + elif config.tensor_parallel.tp_size > 1: + self._create_model_parallel_group(config) + config.tensor_parallel.tp_group = self.mp_group + + if isinstance(self.module, torch.nn.Module): + moe, _ = has_moe_layers(self.module) + else: + moe = False + + if moe and dist.get_world_size() > 1: + self._create_ep_parallel_group(config.moe.moe_experts) + + # We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism if tp_size > 1. + if self.injection_dict: + # 1. User specified Tensor Parallelism + assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection" + for client_module, injection_policy in self.injection_dict.items(): + + assert issubclass(client_module, + torch.nn.Module), f"{client_module} is not a subclass of torch.nn.Module" + + # construct the tuple and pass that instead of a string or dict. + if isinstance(injection_policy, str): + config.injection_policy_tuple = (injection_policy, ) + else: + config.injection_policy_tuple = injection_policy + + layer_names = [name for name, _ in self.module.named_modules()] + for policy in config.injection_policy_tuple: + if not any(name.endswith(policy) for name in layer_names): + raise ValueError(f"Injection policy layer'{policy}' not valid.") + + self._apply_injection_policy(config, client_module) + else: + if config.replace_with_kernel_inject: + # 2. DeepSpeed Kernel Injection + self._apply_injection_policy(config) + elif config.tensor_parallel.tp_size > 1: + # 3. Automatic Tensor Parallelism + parser_dict = AutoTP.tp_parser(model) + print("AutoTP: ", parser_dict) + for client_module, injection_policy in parser_dict: + if isinstance(injection_policy, str): + config.injection_policy_tuple = (injection_policy, ) + else: + config.injection_policy_tuple = injection_policy + self._apply_injection_policy(config, client_module) + + device = get_accelerator().current_device_name() + # NOTE: This check assumes a Hugging Face hierarchy for the device type i.e. module.device.type + is_meta_device = hasattr(self.module, "device") and self.module.device.type == 'meta' + if is_meta_device: + self.module.to_empty(device=device) + else: + self.module.to(device) + + if config.tensor_parallel.tp_size > 1: + _rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name()) + dist.broadcast(_rng_state, 0) + get_accelerator().set_rng_state(_rng_state.cpu()) + + if config.tensor_parallel.tp_size > 1: + assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" + + # Check if local CUDA graphs can be created in replacement modules + self.local_cuda_graph = self._local_cuda_graph_used(self.module) + + def destroy(self): + # Have to import here because inference_module is a global, but python + # globals only work at the module level and will not be updated unless + # we import it each time we init a new inference engine. + from ..model_implementations.transformers.ds_transformer import inference_module + DeepSpeedTransformerInference.layer_id = 0 + DeepSpeedSelfAttention.num_layers = 0 + if inference_module is not None: + inference_module.release_workspace() + inference_module = None + + def profile_model_time(self, use_cuda_events=True): + if not self.model_profile_enabled and not self._config.enable_cuda_graph: + self.module.register_forward_pre_hook(self._pre_forward_hook) + self.module.register_forward_hook(self._post_forward_hook) + self.model_profile_enabled = True + self.use_cuda_events = use_cuda_events + if self.use_cuda_events: + self.timers = SynchronizedWallClockTimer() + + # todo: remove this once all the config dicts are centralized from top level pydantic config + def _get_model_config_generate(self, config): + # this is being passed to replace_transformer_layer(config=self.user_model_config_dict) + self.config = getattr(self.module, 'config', None) if config.config is None else config.config + + def remove_mask_prepare_for_bloom(self): + if hasattr(self.module, 'transformer'): + if hasattr(self.module.transformer, '_prepare_attn_mask'): + self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask + + def build_alibi_tensor(self): + if hasattr(self.module, 'transformer'): + if hasattr(self.module.transformer, 'build_alibi_tensor'): + self.module.transformer.build_alibi_tensor = build_bloom_alibi_tensor + if hasattr(self.module.transformer, 'build_mpt_alibi_tensor'): + self.module.transformer.build_mpt_alibi_tensor_orig = self.module.transformer.build_mpt_alibi_tensor + self.module.transformer.__class__.build_mpt_alibi_tensor = build_mpt_alibi_tensor + if hasattr(self.module, 'model'): + if hasattr(self.module.model, 'get_alibi_mask'): + self.module.model.get_alibi_mask_orig = self.module.model.get_alibi_mask + self.module.model.__class__.get_alibi_mask = get_alibi_mask + + def build_attn_bias(self): + if hasattr(self.module, 'transformer'): + if hasattr(self.module.transformer, '_attn_bias'): + self.module.transformer._attn_bias_orig = self.module.transformer._attn_bias + self.module.transformer.__class__._attn_bias = build_mpt_atten_bias_tensor + + def _pre_forward_hook(self, module, *inputs, **kwargs): + if self.use_cuda_events: + self.timers(INFERENCE_MODEL_TIMER).start() + else: + get_accelerator().synchronize() + self._start = time.time() + + def _post_forward_hook(self, module, input, output): + if self.use_cuda_events: + self.timers(INFERENCE_MODEL_TIMER).stop() + elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True) + else: + get_accelerator().synchronize() + self._end = time.time() + elapsed_time = (self._end - self._start) * 1e3 # convert seconds to ms + self._model_times.append(elapsed_time) + + def _create_model_parallel_group(self, config): + # Call the init process + if InferenceEngine.inference_mp_group is None: + init_distributed() + local_rank = int(os.getenv('LOCAL_RANK', '0')) + get_accelerator().set_device(local_rank) + + ranks = [i for i in range(config.tensor_parallel.tp_size)] + self.mp_group = dist.new_group(ranks) + InferenceEngine.inference_mp_group = self.mp_group + else: + self.mp_group = InferenceEngine.inference_mp_group + + def _create_ep_parallel_group(self, moe_experts): + # Call the init process + self.ep_group = {} + self.expert_mp_group = {} + moe_experts = moe_experts if type(moe_experts) is list else [moe_experts] + for e in moe_experts: + self.ep_group.update({e: None}) + self.expert_mp_group.update({e: None}) + for moe_ep_size in self.ep_group.keys(): + num_ep_groups = dist.get_world_size() // moe_ep_size + for i in range(num_ep_groups): + ep_cnt = i * moe_ep_size + size = dist.get_world_size() if moe_ep_size > dist.get_world_size() else moe_ep_size + ranks = list(range(ep_cnt, ep_cnt + size)) + _ep_group = dist.new_group(ranks) + if dist.get_rank() in ranks: + self.ep_group.update({moe_ep_size: _ep_group}) + + if dist.get_world_size() > moe_ep_size: + num_expert_mp_groups = dist.get_world_size() // num_ep_groups + expert_mp_size = dist.get_world_size() // moe_ep_size + for i in range(num_expert_mp_groups): + expert_mp_comm_ranks = [i + nr * moe_ep_size for nr in range(expert_mp_size)] + _expert_mp_group = dist.new_group(expert_mp_comm_ranks) + if dist.get_rank() in expert_mp_comm_ranks: + self.expert_mp_group.update({moe_ep_size: _expert_mp_group}) + + def _init_quantization_setting(self, quantization_setting): + self.quantize_bits = 8 + self.mlp_extra_grouping = False + self.quantize_groups = 1 + if type(quantization_setting) is tuple: + self.mlp_extra_grouping, \ + self.quantize_groups = quantization_setting + elif quantization_setting is not None: + self.quantize_groups = quantization_setting + log_dist( + f"quantize_bits = {self.quantize_bits} " + f"mlp_extra_grouping = {self.mlp_extra_grouping}, " + f"quantize_groups = {self.quantize_groups}", [0]) + + # TODO: remove this function and add this functionality to pydantic config checking + def _validate_args(self, mpu, replace_with_kernel_inject): + # TODO: to support SD pipeline we need to avoid this check for now + if replace_with_kernel_inject and not isinstance(self.module, Module): + raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}") + if not isinstance(self._config.tensor_parallel.tp_size, int) or self._config.tensor_parallel.tp_size < 1: + raise ValueError(f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}") + + if mpu: + methods = ["get_model_parallel_group", "get_data_parallel_group"] + for method in methods: + if not hasattr(mpu, method): + raise ValueError(f"mpu is missing {method}") + if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)): + raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}") + + supported_dtypes = [None, torch.half, torch.int8, torch.float] + if self._config.dtype not in supported_dtypes: + raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") + + if self.injection_dict is not None and not isinstance(self.injection_dict, dict): + raise ValueError(f"injection_dict must be None or a dict, got: {self.injection_dict}") + + def load_model_with_checkpoint(self, r_module): + self.mp_replace = ReplaceWithTensorSlicing( + mp_group=self.mp_group, mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1) + error_msgs = [] + + def load(module, state_dict, prefix): + args = (state_dict, prefix, {}, True, [], [], error_msgs) + if hasattr(module, 'weight'): + if module.weight.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, + device="cpu"), + requires_grad=module.weight.data.requires_grad) + if 'query_key_value' in prefix: + module.weight = self.mp_replace.strided_copy(module.weight.data, + state_dict[prefix + 'weight'], + num_splits=3) + else: + module.weight = self.mp_replace.copy(module.weight.data, state_dict[prefix + 'weight']) + else: + if module.norm.weight.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.norm.weight = torch.nn.parameter.Parameter( + data=torch.empty_like(module.norm.weight.data, device="cpu"), + requires_grad=module.norm.weight.data.requires_grad) + module.norm.weight = self.mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight']) + if prefix + 'bias' in self.key_list: + if hasattr(module, 'norm'): + if module.norm.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.norm.bias = torch.nn.parameter.Parameter( + data=torch.empty_like(module.norm.bias.data, device="cpu"), + requires_grad=module.norm.bias.data.requires_grad) + module.norm.bias = self.mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias']) + else: + if module.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, + device="cpu"), + requires_grad=module.bias.data.requires_grad) + data = state_dict[prefix + 'bias'] + data = data.to(get_accelerator().current_device_name()) + module.bias = self.mp_replace.copy(module.bias, data) + + layer_policies = { + nn.Linear: load, + nn.Embedding: load, + nn.LayerNorm: load, + LinearLayer: load, + LinearAllreduce: load + } + + def load_module_recursive(module, prefix='', level=0): + for name, child in module.named_children(): + if child.__class__ in layer_policies: + checking_key = prefix + name + '.' + if not any(checking_key in item for item in self.key_list): + continue + if len(list(child.parameters())) > 0 and list(child.parameters())[0].numel() == 0: + if len(child.weight.ds_shape) == 1: + child = Normalize(dim=child.weight.ds_shape[-1], dtype=child.weight.dtype, eps=child.eps) + setattr(module, name, child) + load(child, self.sd, prefix + name + '.') + else: + load_module_recursive(child, prefix if level == 0 else prefix + name + '.', level + 1) + + load_module_recursive(r_module) + + embedding_weight = None + + for n, p in r_module.named_parameters(): + if "word_embeddings." in n or "embed_tokens." in n or "wte." in n: + embedding_weight = p + if embedding_weight is not None and hasattr(r_module, "lm_head") and hasattr( + r_module.lm_head, "weight") and r_module.lm_head.weight.is_meta: + r_module.lm_head.weight = embedding_weight + + def _apply_injection_policy(self, config, client_module=None): + # client_module is only passed when using the injection_dict method. + checkpoint_dir = config.checkpoint + checkpoint = SDLoaderFactory.get_sd_loader_json(checkpoint_dir, + self.checkpoint_engine) if checkpoint_dir is not None else None + + generic_injection(self.module, dtype=config.dtype, enable_cuda_graph=config.enable_cuda_graph) + + if isinstance(self.module, torch.nn.Module): + # config is our DeepSpeedInferenceConfig and self.config is the HF model config + replace_transformer_layer(client_module, self.module, checkpoint, config, self.config) + + def _get_all_ckpt_names(self, checkpoints_path, tag): + ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*") + import glob + + ckpt_files = glob.glob(ckpt_file_pattern) + ckpt_files.sort() + return ckpt_files + + def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None): + if mp_placeholder is not None: + mp_rank_str = mp_placeholder + else: + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + mp_rank_str = "{:02d}".format(mp_rank) + + ckpt_name = os.path.join( + checkpoints_path, + "mp_rank_" + mp_rank_str + "_model_states.pt", + ) + return ckpt_name + + def _load_checkpoint(self, load_dir, load_module_strict=True, tag=None): + is_pipe_parallel = isinstance(self.module, PipelineModule) + if is_pipe_parallel: + raise RuntimeError('pipeline parallelism is currently not supported in inference.') + if not isinstance(load_dir, dict) and os.path.isdir(load_dir): + if tag is None: + latest_path = os.path.join(load_dir, "latest") + if os.path.isfile(latest_path): + with open(latest_path, "r") as fd: + tag = fd.read().strip() + + ckpt_list = self._get_all_ckpt_names(load_dir, tag) + sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine) + else: + sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, self.checkpoint_engine) + + checkpoint = sd_loader['checkpoints'] + + if type(checkpoint) is list: + self.sd = torch.load(checkpoint[0], map_location='cpu') + self.key_list = list(self.sd.keys()) + + self.load_model_with_checkpoint(self.module) + + for i in range(1, len(checkpoint)): + if not dist.is_initialized() or dist.get_rank() == 0: + print(f"loading checkpoint ({i})") + self.sd = torch.load(checkpoint[i], map_location=get_accelerator().device_name()) + self.key_list = list(self.sd.keys()) + self.load_model_with_checkpoint(self.module) + else: + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + + load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size, + mp_rank, + is_pipe_parallel=is_pipe_parallel, + quantize=(self._config.dtype is torch.int8), + quantize_groups=self.quantize_groups, + mlp_extra_grouping=self.mlp_extra_grouping) + + self.quantization_scales, self.quantize_merge_count = quantize_config + + moe, _ = has_moe_layers(self.module) + if moe: + from deepspeed.runtime.engine import DeepSpeedEngine + old_moe_load = False + if not isinstance(checkpoint['num_experts'], list): + old_moe_load = True + DeepSpeedEngine.load_moe_state_dict(load_dir, + tag, + state_dict=checkpoint[self._choose_module_key(checkpoint)], + old_moe_load=old_moe_load, + model=self.module, + mpu=self.mpu, + checkpoint_engine=self.checkpoint_engine) + + self.module.load_state_dict(state_dict=checkpoint[self._choose_module_key(checkpoint)], + strict=load_module_strict) + + def _choose_module_key(self, sd): + assert not ('module' in sd + and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed" + assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed" + if 'module' in sd: + return 'module' + elif 'model' in sd: + return 'model' + + def _convert_to_dtype(self, config): + if not isinstance(self.module, torch.nn.Module): + return + + if False: #config.dtype is torch.int8 and self.quantization_scales is None: + quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping) + model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict, + self.quantize_bits, self.quantize_groups) + elif config.dtype == torch.half: + self.module.half() + elif config.dtype == torch.bfloat16: + self.module.bfloat16() + elif config.dtype == torch.float: + self.module.float() + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = get_accelerator().Stream() + cuda_stream.wait_stream(get_accelerator().current_stream()) + with get_accelerator().stream(cuda_stream): + for i in range(3): + ret = self.module(*inputs, **kwargs) + get_accelerator().current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs = get_accelerator().create_graph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with get_accelerator().capture_to_graph(self._cuda_graphs): + self.static_output = self.module(*self.static_inputs, **self.static_kwargs) + + self.cuda_graph_created = True + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._cuda_graphs) + return self.static_output + + def model_times(self): + assert self.model_profile_enabled, "model profiling is not enabled" + model_times = self._model_times + if self._config.enable_cuda_graph and len(self._model_times) == 0: + raise ValueError("Model times are empty and cuda graph is enabled. If " + "this is a GPT-style model this combo is not supported. If this is a " + "BERT-style model this is a bug, please report it. " + f"Model type is: {type(self.module)}") + self._model_times = [] + return model_times + + def _module_match(self, module): + for policy in generic_policies: + policy = policy() + if policy.match_replaced(module): + return True + return False + + def _local_cuda_graph_used(self, module): + if isinstance(module, torch.nn.Module): + return False + else: + sub_module_cuda_graph = False + for name in module.__dict__.keys(): + sub_module = getattr(module, name) + + if self._module_match(sub_module) and hasattr(sub_module, "enable_cuda_graph"): + sub_module_cuda_graph = True + + return sub_module_cuda_graph + + def forward(self, *inputs, **kwargs): + """Execute forward propagation + + Arguments: + *inputs: Variable length input list + **kwargs: variable length keyword arguments + """ + start = None + if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph: + get_accelerator().synchronize() + start = time.time() + + if get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + + else: + outputs = self.module(*inputs, **kwargs) + + if self.model_profile_enabled and self._config.enable_cuda_graph: + get_accelerator().synchronize() + duration = (time.time() - start) * 1e3 # convert seconds to ms + self._model_times.append(duration) + + return outputs + + def _generate(self, *inputs, **kwargs): + # Reset KV-cache at the beginning of generate + if hasattr(self.module, 'reset_cache'): + self.module.reset_cache() + num_beams = 1 + if "generation_config" in kwargs: + gen_config = kwargs["generation_config"] + num_beams = getattr(gen_config, "num_beams", 1) + if "num_beams" in kwargs: + num_beams = kwargs["num_beams"] + + if num_beams > 1: + raise NotImplementedError("DeepSpeed does not support `num_beams` > 1, if this is important to you please " + "add your request to: https://github.com/microsoft/DeepSpeed/issues/2506") + + if ("input_ids" in kwargs) and (kwargs["input_ids"].dim() == 2): + for input_tensor in kwargs["input_ids"]: + tensor_length = input_tensor.shape[-1] + if tensor_length > self._config.max_out_tokens: + raise RuntimeError( + f"Input with size {tensor_length} exceeds maximum length of {self._config.max_out_tokens}. Please increase `max_tokens` in the DeepSpeed Inference Config." + ) + + return self.module.generate(*inputs, **kwargs) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208299fb8c50f73468d293b6fa5dca71649d62e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0b02a2ce8bdb6a1b9d9e6848c5b2cf57b530ba1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/layers.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..324adc134f209496ad842aad0919bed6d5a666d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/layers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..391f98e5b6c40b07863ada3f22b560d13efae28b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization_context.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..250ceabb40c94c76bace7d2b84713f00597954e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/quantization_context.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c2078f44c8cd15ac4aaed9316b5a92b67aa5d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/layers.py b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a7e5629f1b2f3c4cd57889f3276e876cecc7db --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/layers.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from torch import nn +from torch import Tensor +from torch.nn import functional as F +from .utils import Quantizer, DeQuantizer, concat_to_compat_param +from typing import Tuple, Callable, Dict +from deepspeed.runtime.zero import register_external_parameter + +quantized_weight_registry = {} +is_zero3_enabled = False + + +# deal with weight sharing +def get_quantized_weight_wrapper(model, pre_quant_weight: nn.Parameter, quantize_weight_fn: Callable) -> nn.Parameter: + if id(pre_quant_weight) in quantized_weight_registry: + compat_tensor = quantized_weight_registry[id(pre_quant_weight)] + if is_zero3_enabled: + register_external_parameter(model, compat_tensor) + + return quantized_weight_registry[id(pre_quant_weight)] + else: + quantized_weights, quant_scale, quant_min = quantize_weight_fn() + quantized_weight_registry[id(pre_quant_weight)] = concat_to_compat_param(quantized_weights, quant_scale, + quant_min) + return quantized_weight_registry[id(pre_quant_weight)] + + +def get_quantize_weight_fn(quantizer: Quantizer, pre_quant_weight: nn.Parameter) -> Callable: + + def func() -> Tuple[nn.Parameter, Tensor, Tensor]: + quantized_weights, quant_scale, quant_min = quantizer.quantize(pre_quant_weight.data) + # A temporary hack as zero Zero3 assume all model weights has the same type. in all_gather_coalesced.get_only_unique_item + quantized_weights = quantized_weights.view(pre_quant_weight.dtype) + quant_scale = quant_scale.type(pre_quant_weight.dtype) + quant_min = quant_min.type(pre_quant_weight.dtype) + return quantized_weights, quant_scale, quant_min + + return func + + +class QuantizedLinear(nn.Linear): + + def __init__(self, config: Dict, pre_quant_layer: nn.Linear) -> None: + super(QuantizedLinear, self).__init__(in_features=pre_quant_layer.in_features, + out_features=pre_quant_layer.out_features, + bias=pre_quant_layer.bias is not None, + device=pre_quant_layer.weight.device, + dtype=pre_quant_layer.weight.dtype) + self.config = config + + self.quantizer = Quantizer(config=config) + self.bias = pre_quant_layer.bias + self.weight = get_quantized_weight_wrapper(self, pre_quant_layer.weight, + get_quantize_weight_fn(self.quantizer, pre_quant_layer.weight)) + + self.weight.dequantizer = DeQuantizer(config, pre_quant_layer.weight.dtype) + + def forward(self, input: Tensor) -> Tensor: + quantized_weight, quant_scale, quant_min = self.weight.deconcat(self.weight) + temp_dequantized_weight = self.weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale, + quant_min) + + # !!! Do not use torch.functional.linear(input, temp_dequantized_weight, self.bias) here as in zero3 torch.functional.linear is + # replaced by LinearFunctionForZeroStage3. Which assume weight is non-temporary. + # If weight is temp buffer there will be memory leak. + return torch._C._nn.linear(input, temp_dequantized_weight, self.bias) + + +class QuantizedEmbedding(nn.Embedding): + + def __init__(self, config: Dict, pre_quant_layer: nn.Embedding) -> None: + super(QuantizedEmbedding, self).__init__(num_embeddings=pre_quant_layer.num_embeddings, + embedding_dim=pre_quant_layer.embedding_dim, + padding_idx=pre_quant_layer.padding_idx, + max_norm=pre_quant_layer.max_norm, + norm_type=pre_quant_layer.norm_type, + scale_grad_by_freq=pre_quant_layer.scale_grad_by_freq, + sparse=pre_quant_layer.sparse, + _weight=pre_quant_layer.weight, + device=pre_quant_layer.weight.device, + dtype=pre_quant_layer.weight.dtype) + + assert pre_quant_layer.max_norm is None, 'Not supported' + assert pre_quant_layer.norm_type == 2, 'Not supported' + assert pre_quant_layer.scale_grad_by_freq == False, 'Not supported' + assert pre_quant_layer.sparse == False, 'Not supported' + + self.config = config + quantizer = Quantizer(config=config) + + self.weight = get_quantized_weight_wrapper(self, pre_quant_layer.weight, + get_quantize_weight_fn(quantizer, pre_quant_layer.weight)) + + self.weight.dequantizer = DeQuantizer(config, pre_quant_layer.weight.dtype) + + def forward(self, input: Tensor) -> Tensor: + quantized_weight, quant_scale, quant_min = self.weight.deconcat(self.weight) + temp_dequantized_weight = self.weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale, + quant_min) + + return F.embedding(input, temp_dequantized_weight, self.padding_idx, self.max_norm, self.norm_type, + self.scale_grad_by_freq, self.sparse) + + +QUANTIZATION_LAYER_MAPPINGS = { + nn.Linear: QuantizedLinear, + nn.Embedding: QuantizedEmbedding, +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization.py b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae39e8d568839f12d14aa98569677b5de9a7086 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from torch import nn +from typing import Dict +import gc +from deepspeed.inference.quantization import layers +from .layers import QUANTIZATION_LAYER_MAPPINGS +from .utils import get_AsyncPartitionedParameterSwapper, recursive_setattr +from deepspeed.utils.logging import logger +from collections import deque +from transformers.utils.generic import ContextManagers +from .quantization_context import QuantizationContext +import contextlib + + +def _init_group_wise_weight_quantization(model: nn.Module, ds_config: Dict) -> nn.Module: + """[Experimental] Apply group-wise weight quantization to model. Replace layers module according to config_list + + Args: + model (nn.Module): A nn.Module + ds_config (Dict, optional): The ds_config dictionary. use None for non-deepspeed managed model. + + Returns: + nn.Module: Quantized nn.Module + """ + + # global quantized_weight_registry + + matched_module_list_by_key = {} + matched_module_count = 0 + + assert 'weight_quantization' in ds_config, 'Please provide quantization config in ds_config' + quantization_config = ds_config['weight_quantization']['post_init_quant'] + + # Return nvme swapper if exists, else return None. + # For nvme offloading we must use the same swapper here as model initialized. + nvme_swapper = get_AsyncPartitionedParameterSwapper(model) + is_zero3_enabled = 'zero_optimization' in ds_config and \ + 'stage' in ds_config['zero_optimization'] and \ + ds_config['zero_optimization']['stage'] == 3 + is_offloading_enabled = 'zero_optimization' in ds_config and \ + 'offload_param' in ds_config['zero_optimization'] + + layers.is_zero3_enabled = is_zero3_enabled + + context_mgr = ContextManagers([QuantizationContext(config_dict_or_path=ds_config, param_swapper=nvme_swapper)]) \ + if is_zero3_enabled else contextlib.suppress() + with context_mgr: + module_list = list( + filter(lambda named_module: type(named_module[1]) in QUANTIZATION_LAYER_MAPPINGS, model.named_modules())) + + # Quantize small weight first then large. + if not is_offloading_enabled: + module_list.sort(key=lambda named_module: named_module[1].weight.ds_tensor.numel() + if is_zero3_enabled else named_module[1].weight.numel()) + module_list = deque(module_list) + + while len(module_list) > 0: + # Use popleft to timely release module's memory of replaced module after each loop iteration + module_name, module = module_list.popleft() + + matched_key = None + matched_quantization_config = None + + for key, config in quantization_config.items(): + if key in module_name: + assert matched_key is None, f'{module_name} matched multiple quantization key word {matched_key} and {key}' + matched_key = key + matched_quantization_config = config + + if matched_key is None: + continue + + if is_zero3_enabled: + module.weight.all_gather() + + assert module.weight.dtype == torch.float16, 'Model weight is expected in half.' + + new_module = QUANTIZATION_LAYER_MAPPINGS[type(module)](matched_quantization_config, module) + + if is_zero3_enabled: + module.weight.partition() + + recursive_setattr(model, module_name, new_module) + + if matched_key not in matched_module_list_by_key: + matched_module_list_by_key[matched_key] = [] + matched_module_list_by_key[matched_key].append(module_name) + matched_module_count += 1 + + # Timely recycle memory to prevent OOM on large models + gc.collect() + + # Clear registry after model construction. + layers.quantized_weight_registry.clear() + + logger.info( + f'Group-wise weight quantization summary: convert {matched_module_count} node(s) to quantized implementation') + summary_str = '\n' + + for key, module_list in matched_module_list_by_key.items(): + summary_str += f'Key: {key}, matched modules:\n' + for module_name in module_list: + summary_str += f'\t{module_name}\n' + logger.info(summary_str) + + return model diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization_context.py b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization_context.py new file mode 100644 index 0000000000000000000000000000000000000000..d3333da0505883f032b18bc356a636a7b88170a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/quantization_context.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.zero import partition_parameters +from deepspeed.runtime.swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper + + +class QuantizationContext(partition_parameters.Init): + + def __init__(self, config_dict_or_path, param_swapper: AsyncPartitionedParameterSwapper = None) -> None: + super().__init__(config_dict_or_path=config_dict_or_path, param_swapper=param_swapper) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/utils.py b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..712abc384a44c0ae31d140faa2acb84dcdb161af --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/quantization/utils.py @@ -0,0 +1,288 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import deepspeed +from torch import Tensor +from typing import Tuple +import torch.nn as nn +from typing import Dict, Callable, Union +from deepspeed.accelerator import get_accelerator +import functools + +device = get_accelerator().device_name() if get_accelerator().is_available() else 'cpu' + +quantizer_cuda_module = None + + +def get_quantizer_cuda_module(): + global quantizer_cuda_module + if quantizer_cuda_module is None: + quantizer_cuda_module = deepspeed.ops.op_builder.QuantizerBuilder().load() + return quantizer_cuda_module + + +def tensor_clamp(tensor: Tensor, min, max) -> Tensor: + if tensor.device.type == 'cpu' and tensor.dtype == torch.float16: + # CPU does not support FP16 clamp + return tensor.to(dtype=torch.float32).clamp_(min, max).to(dtype=torch.float16) + else: + return tensor.clamp_(min, max) + + +def tensor_round(tensor: Tensor) -> Tensor: + if tensor.device.type == 'cpu' and tensor.dtype == torch.float16: + # CPU does not support FP16 round + return tensor.to(dtype=torch.float32).round_().to(dtype=torch.float16) + else: + return tensor.round_() + + +class Quantizer: + + def __init__(self, config: Dict) -> None: + self.config = config + assert self.config['num_bits'] == 4 or self.config[ + 'num_bits'] == 8, 'Only INT4 and INT8 quantization is supported.' + assert self.config['symmetric'] == False, 'Only asymmetric quantization is supported at this moment.' + + def quantize(self, tensor: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + assert tensor.shape[self.config['group_dim']] % self.config['group_size'] == 0 \ + , f'Tensor shape: {tensor.shape} quantization config {self.config}' + + tensor = torch.clone(tensor) + + shape = tensor.shape + num_groups = shape[self.config['group_dim']] // self.config['group_size'] + new_shape = (shape[:self.config['group_dim']] + (num_groups, self.config['group_size']) + + shape[self.config['group_dim'] + 1:]) + tensor = tensor.view(new_shape) + + quantized_tensor, scale, min_value = self._quantize_int8(tensor) + quantized_tensor = quantized_tensor.view(shape) + + if self.config['num_bits'] == 4: + return self._compress_uint8_to_uint4(quantized_tensor), scale, min_value + if self.config['num_bits'] == 8: + return quantized_tensor, scale, min_value + + assert False, 'Unsupported quantization bits {}'.format(self.config['num_bits']) + + def _quantize_int8(self, tensor: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + q_range = 2**self.config['num_bits'] - 1 + min_value = tensor.amin(dim=self.config['group_dim'] + 1, keepdim=True) + max_value = tensor.amax(dim=self.config['group_dim'] + 1, keepdim=True) + + scale = q_range / (max_value - min_value) + + tensor = tensor.sub_(min_value).mul_(scale) + tensor = tensor_round(tensor_clamp(tensor, 0, q_range)).to(torch.uint8) + return tensor, scale, min_value + + def _compress_uint8_to_uint4(self, tensor: Tensor) -> Tensor: + assert tensor.shape[-1] % 2 == 0 + + new_data_shape = list(tensor.shape) + new_data_shape[-1] = new_data_shape[-1] // 2 + + data = torch.empty(new_data_shape, dtype=torch.uint8, device=tensor.device) + data = torch.bitwise_or(tensor[..., 0::2].bitwise_left_shift(4), tensor[..., 1::2]) + + return data + + +class DeQuantizer: + + def __init__(self, config: Dict, dtype: torch.dtype) -> None: + self.config = config + self.dtype = dtype + assert self.config['num_bits'] == 4 or self.config[ + 'num_bits'] == 8, 'Only INT4 and INT8 quantization is supported.' + assert self.config['symmetric'] == False, 'Only asymmetric quantization is supported at this moment.' + + def dequantize(self, tensor: Tensor, quant_scale: Tensor, quant_min: Tensor) -> Tensor: + # Use customized CUDA quantization kernel if possible. + if self.config['group_size'] % 8 == 0 and \ + (self.config['num_bits'] == 4 or self.config['num_bits'] == 8) and \ + self.config['group_dim'] == len(tensor.shape) - 1 and \ + self.dtype == torch.float16 and device == 'cuda': + + last_dimension_size = self.config['group_size'] + if self.config['num_bits'] == 4: + last_dimension_size = last_dimension_size // 2 + quantized_tensor = get_quantizer_cuda_module().dequantize_int4_to_half_experimental( + tensor.reshape(-1, last_dimension_size), quant_scale, quant_min, + tensor.numel() // last_dimension_size, self.config['group_size']) + shape = list(tensor.shape) + shape[-1] = shape[-1] * 2 + elif self.config['num_bits'] == 8: + # last_dimension_size = last_dimension_size // 2 + quantized_tensor = get_quantizer_cuda_module().dequantize_int8_to_half_experimental( + tensor.reshape(-1, last_dimension_size), quant_scale, quant_min, + tensor.numel() // last_dimension_size, self.config['group_size']) + shape = list(tensor.shape) + + return quantized_tensor.reshape(shape) + + if self.config['num_bits'] == 4: + tensor = self._decompress_uint4_to_uint8(tensor) + elif self.config['num_bits'] != 8: + assert False, 'Unsupported quantization bits {}'.format(self.config['num_bits']) + + shape = tensor.shape + num_groups = shape[self.config['group_dim']] // self.config['group_size'] + new_shape = (shape[:self.config['group_dim']] + (num_groups, self.config['group_size']) + + shape[self.config['group_dim'] + 1:]) + tensor = tensor.view(new_shape) + + dequantized_tensor = self._dequantize_int8(tensor, quant_scale, quant_min).view(shape) + return dequantized_tensor + + def _dequantize_int8(self, tensor: Tensor, quant_scale: Tensor, quant_min: Tensor) -> Tensor: + assert tensor.dtype == torch.uint8 + data = torch.zeros_like(tensor, dtype=self.dtype, device=tensor.device) + data = data.copy_(tensor) + data = data.div_(quant_scale).add_(quant_min) + + return data + + def _decompress_uint4_to_uint8(self, tensor: Tensor) -> Tensor: + new_data_shape = list(tensor.shape) + new_data_shape[-1] = new_data_shape[-1] * 2 + data = torch.empty(new_data_shape, dtype=torch.uint8, device=tensor.device) + data[..., 0::2] = tensor.bitwise_right_shift(4) + data[..., 1::2] = tensor.bitwise_and(0xF) + + return data + + +def get_AsyncPartitionedParameterSwapper(model: nn.Module): + for param_name, param in model.named_parameters(): + if hasattr(param, 'nvme_swapper') and param.nvme_swapper is not None: + return param.nvme_swapper + return None + + +def recursive_setattr(model, module_name, module): + """ + Recursively set the attribute of a module. + Args: + model (`torch.nn.Module`) + The model to set the attribute in. + module_name (`str`) + The name of the module to set the attribute in. + module (`torch.nn.Module`) + The module to set the attribute to. + """ + split_list = module_name.split('.') + output = model + for name in split_list[:-1]: + output = getattr(output, name) + output.__setattr__(split_list[-1], module) + + +def concat_to_compat_param(quantized_weight: Tensor, + quant_scale: Tensor, + quant_min: Tensor, + return_param: bool = True) -> Union[nn.Parameter, Tensor]: + shape_wieght = quantized_weight.shape + shape_scale = quant_scale.shape + shape_min = quant_min.shape + + quantized_weight = torch.flatten(quantized_weight) + quant_scale = torch.flatten(quant_scale) + quant_min = torch.flatten(quant_min) + + def deconcat_individual_tensors(shape_wieght: torch.Size, shape_scale: torch.Size, + shape_min: torch.Size) -> Callable: + + def fn(compat_tensor: nn.Parameter) -> Tuple[Tensor, Tensor, Tensor]: + weight = torch.narrow(compat_tensor, 0, 0, shape_wieght.numel()).view(shape_wieght) + scale = torch.narrow(compat_tensor, 0, shape_wieght.numel(), shape_scale.numel()).view(shape_scale) + min_val = torch.narrow(compat_tensor, 0, + shape_wieght.numel() + shape_scale.numel(), shape_min.numel()).view(shape_min) + + return weight, scale, min_val + + return fn + + compat_tensor = torch.concat([quantized_weight, quant_scale, quant_min]) + if return_param: + compat_tensor = nn.Parameter(compat_tensor, requires_grad=False) + compat_tensor.deconcat = deconcat_individual_tensors(shape_wieght, shape_scale, shape_min) + + return compat_tensor + + +def _quantize_param(param: nn.Parameter, quant_config: Dict): + assert not hasattr(param, 'weight_quantized'), 'Parameter has already been quantized.' + quantizer = Quantizer(quant_config) + dequantizer = DeQuantizer(quant_config, param.dtype) + + quantized_weight, quant_scale, quant_min = quantizer.quantize(param.data) + + quantized_weight = quantized_weight.view(param.dtype) + quant_scale = quant_scale.view(param.dtype) + quant_min = quant_min.view(param.dtype) + + quantized_compat_tensor = concat_to_compat_param(quantized_weight, quant_scale, quant_min) + param.data = quantized_compat_tensor + param.deconcat = quantized_compat_tensor.deconcat + + param.quantizer = quantizer + param.dequantizer = dequantizer + setattr(param, 'weight_quantized', True) + + +def wrap_quantized_functional(f): + + @functools.wraps(f) + def wrapper(input: Tensor, weight: nn.Parameter, *args, **kwargs) -> Tensor: + if hasattr(weight, 'weight_quantized') and getattr(weight, 'weight_quantized'): + quantized_weight, quant_scale, quant_min = weight.deconcat(weight) + temp_dequantized_weight = weight.dequantizer.dequantize(quantized_weight.view(torch.uint8), quant_scale, + quant_min) + return f(input, temp_dequantized_weight, *args, **kwargs) + else: + return f(input, weight, *args, **kwargs) + + return wrapper + + +def wrap_load_from_state_dict(f): + + @functools.wraps(f) + def wrapper(model, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + replaced_old_value = None + key = None + # We may have nested wrappers if we launch multiple initialization context. + # Use state_dict_quantized flag to quantize state_dict only once + if hasattr(model.weight, 'weight_quantized') and getattr( + model.weight, 'weight_quantized') and not hasattr(model.weight, 'state_dict_quantized'): + setattr(model.weight, 'state_dict_quantized', True) + key = prefix + 'weight' + if key in state_dict: + quantized_weight, quant_scale, quant_min = model.weight.quantizer.quantize(state_dict[key]) + quantized_weight = quantized_weight.view(model.weight.dtype) + quant_scale = quant_scale.view(model.weight.dtype) + quant_min = quant_min.view(model.weight.dtype) + + replaced_old_value = state_dict[key] + + state_dict[key] = concat_to_compat_param(quantized_weight, quant_scale, quant_min) + + f(model, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) + + if replaced_old_value is not None: + state_dict[key] = replaced_old_value + delattr(model.weight, 'state_dict_quantized') + + return wrapper + + +WEIGHT_QUANTIZATION_LAYERS = ( + nn.Linear, + nn.Embedding, +) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ac8a42da8ab3d9e4ace2a4f1d7b1d455cf7be7fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +from .config_v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig +from .engine_v2 import InferenceEngineV2 +from .engine_factory import build_hf_engine, build_engine_from_ds_checkpoint diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89488093d350437191e92645ec223bdadb5ded60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/allocator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/allocator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f4b4f043b8c7e9efe01c831fce692aa93a76b3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/allocator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/config_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/config_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90555d3d1ca529787fc09f0c56817e3b80da7d09 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/config_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_factory.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..255df3857f2a90a225de51b0408f163f29aad8a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_factory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d32ac0557c0e2e1c0b60cab88122fa2d23c540a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/engine_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_parameter.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_parameter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bf8a913d5e7f58ccdaacbeeaf8f587ffe5116d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_parameter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25831cfdc6eb51692f96f94a6c303ccaf51c166 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/inference_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ff3b92a4b25ac5ab52ab4b94430f3b63f94842 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/scheduling_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/scheduling_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdfb37614daebd81d942d351c14686b0283363da Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/__pycache__/scheduling_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/allocator.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/allocator.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc0d94c0f825170dd89c54db73e53a2baf0c077 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/allocator.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from functools import reduce +from typing import Iterable +from collections import defaultdict +import torch + +from deepspeed.accelerator import get_accelerator + + +class Allocator: + cache = defaultdict(dict) + + def empty_from(tensor: torch.Tensor, shape: Iterable[int]) -> torch.Tensor: + try: + return Allocator.cache[tensor][shape] + except KeyError: + shape_size = reduce(lambda x, y: x * y, shape) + if shape_size == 0: + raise ValueError("Cannot create empty tensor with size 0") + Allocator.cache[tensor][shape] = tensor.flatten()[:shape_size].view(shape) + return Allocator.cache[tensor][shape] + + +empty_from = Allocator.empty_from + + +def on_device(method) -> torch.Tensor: + """ + Wraps a method to ensure the returned tensor is on the current device. + """ + + def wrapped(self, *args, **kwargs): + tensor = method(self, *args, **kwargs) + if isinstance(tensor, torch.Tensor): + return tensor.to(get_accelerator().current_device()) + return tensor + + return wrapped diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..85e4b7a0e0a00b1514c18c06e9f0c43fa41dd6c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/config_v2.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional +from deepspeed.pydantic_v1 import Field +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from .ragged import DSStateManagerConfig + + +class DeepSpeedTPConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + tp_size: int = 1 + """ Number of devices to split the model across using tensor parallelism. """ + + +class QuantizationConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + quantization_mode: Optional[str] = None + """ The quantization mode in string format. The supported modes are as follows: + - 'wf6af16', weight-only quantization with FP6 weight and FP16 activation. + """ + # TODO: may reuse the constants in deepspeed/compression/constants.py + + +class RaggedInferenceEngineConfig(DeepSpeedConfigModel): + """ Sets parameters for DeepSpeed Inference Engine. """ + + tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp") + """ + Configuration for tensor parallelism used to split the model across several + GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`. + """ + + state_manager: DSStateManagerConfig = Field({}, alias="manager") + """ + Configuration for managing persistent state + """ + + quantization: QuantizationConfig = {} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_factory.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..c320108f55e5020e1b2679fa2c518e56ec391b50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_factory.py @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +import logging +import os +import pickle +from packaging import version + +from .engine_v2 import InferenceEngineV2 +from .config_v2 import RaggedInferenceEngineConfig +from .checkpoint import HuggingFaceCheckpointEngine +from .logging import inference_logger +from .model_implementations import ( + OPTPolicy, + Llama2Policy, + MistralPolicy, + MixtralPolicy, + FalconPolicy, + PhiPolicy, + QwenPolicy, + Qwen2Policy, +) +from .model_implementations.inference_policy_base import POLICIES, InferenceV2Policy +from .model_implementations.flat_model_helpers import make_metadata_filename, ModelMetadata + + +def build_engine_from_ds_checkpoint(path: str, + engine_config: RaggedInferenceEngineConfig, + debug_level: int = logging.INFO) -> InferenceEngineV2: + """ + Creates an engine from a checkpoint saved by ``InferenceEngineV2``. + + Arguments: + path: Path to the checkpoint. This does not need to point to any files in particular, + just the directory containing the checkpoint. + engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details. + debug_level: Logging level to use. Unless you are actively seeing issues, the recommended + value is ``logging.INFO``. + + Returns: + Fully initialized inference engine ready to serve queries. + """ + + inference_logger(level=debug_level) + # Load metadata, for grabbing the policy name we'll have all ranks just check for + # rank 0. + metadata_filename = make_metadata_filename(path, 0, engine_config.tensor_parallel.tp_size) + metadata = json.load(open(metadata_filename, "r")) + metadata = ModelMetadata.parse_raw(metadata) + + # Get the policy + try: + policy_cls: InferenceV2Policy = POLICIES[metadata.policy] + except KeyError: + raise ValueError(f"Unknown policy {metadata.policy} for model {path}") + + # Load the model config + model_config = pickle.load(open(os.path.join(path, "ds_model_config.pkl"), "rb")) + policy = policy_cls(model_config, inf_checkpoint_path=path) + + return InferenceEngineV2(policy, engine_config) + + +def build_hf_engine(path: str, + engine_config: RaggedInferenceEngineConfig, + debug_level: int = logging.INFO) -> InferenceEngineV2: + """ + Build an InferenceV2 engine for HuggingFace models. This can accept both a HuggingFace + model name or a path to an Inference-V2 checkpoint. + + Arguments: + path: Path to the checkpoint. This does not need to point to any files in particular, + just the directory containing the checkpoint. + engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details. + debug_level: Logging level to use. Unless you are actively seeing issues, the recommended + value is ``logging.INFO``. + + Returns: + Fully initialized inference engine ready to serve queries. + """ + + if os.path.exists(os.path.join(path, "ds_model_config.pkl")): + return build_engine_from_ds_checkpoint(path, engine_config, debug_level=debug_level) + else: + # Set up logging + inference_logger(level=debug_level) + # get HF checkpoint engine + checkpoint_engine = HuggingFaceCheckpointEngine(path) + + # get model config from HF AutoConfig + model_config = checkpoint_engine.model_config + + # get the policy + # TODO: generalize this to other models + if model_config.model_type == "opt": + if not model_config.do_layer_norm_before: + raise ValueError( + "Detected OPT-350m model. This model is not currently supported. If this is not the 350m model, please open an issue: https://github.com/microsoft/DeepSpeed-MII/issues" + ) + policy = OPTPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "llama": + policy = Llama2Policy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "mistral": + # Ensure we're using the correct version of transformers for mistral + import transformers + assert version.parse(transformers.__version__) >= version.parse("4.34.0"), \ + f"Mistral requires transformers >= 4.34.0, you have version {transformers.__version__}" + policy = MistralPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "mixtral": + # Ensure we're using the correct version of transformers for mistral + import transformers + assert version.parse(transformers.__version__) >= version.parse("4.36.1"), \ + f"Mistral requires transformers >= 4.36.1, you have version {transformers.__version__}" + policy = MixtralPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "falcon": + policy = FalconPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "phi": + policy = PhiPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "qwen": + policy = QwenPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "qwen2": + policy = Qwen2Policy(model_config, checkpoint_engine=checkpoint_engine) + else: + raise ValueError(f"Unsupported model type {model_config.model_type}") + + return InferenceEngineV2(policy, engine_config) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..4a358310377f00af00d92b6e5dcd2fd5d600392d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/engine_v2.py @@ -0,0 +1,268 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import json +import pickle +from typing import Iterable, Tuple + +import torch + +import deepspeed.comm as dist + +from deepspeed.accelerator import get_accelerator +from deepspeed.comm.comm import init_distributed + +from .model_implementations import InferenceV2Policy +from .logging import inference_logger +from .ragged import DSStateManager, RaggedBatchWrapper, PlaceholderSequenceDescriptor +from .scheduling_utils import SchedulingError, SchedulingResult +from .model_implementations.flat_model_helpers import make_param_filename, make_metadata_filename +from .model_implementations.inference_model_base import DSInferenceModelBase + +from .config_v2 import RaggedInferenceEngineConfig + +INFERENCE_MODEL_TIMER = "model-forward-inference" + + +class InferenceEngineV2: + + _config: RaggedInferenceEngineConfig + """ + Configuration of the inference engine. + """ + + _model: DSInferenceModelBase + """ + Inference model supporting ragged inference. + """ + + _state_manager: DSStateManager + """ + Persistent state manager for sequences and KV-cache. + """ + + @property + def free_blocks(self) -> torch.Tensor: + """ + Number of free KV blocks. This is a tensor of shape [n_kv_cache_groups] where each + element is the number of free blocks in the corresponding KV cache group. + """ + return self._state_manager.free_blocks + + @property + def n_kv_cache_groups(self) -> int: + """ + Number of KV cache groups. + """ + return self._state_manager.n_kv_cache_groups + + def model(self) -> DSInferenceModelBase: + """ + The model implementation. + """ + return self._model + + def __init__(self, policy: InferenceV2Policy, engine_config: RaggedInferenceEngineConfig) -> None: + """ + Create the Inference V2 engine. + + Arguments: + policy (InferenceV2Policy): Policy for the model implementation. This policy object + will be used to build the model and load the checkpoint associated with it. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + """ + self._config = engine_config + self._policy = policy + self._base_mp_group = self._initialize_tp_group() + + # Build model from policy + inference_logger().info("Building model...") + self._model = self._policy.build_model(self._config, self._base_mp_group) + inference_logger().info("Model built.") + + # Create state manager + self._batch = RaggedBatchWrapper(self._config.state_manager) + self._state_manager = DSStateManager(self._config.state_manager, + self._model.kv_cache_config(), + base_mp_group=self._base_mp_group) + self._model.set_state_manager(self._state_manager) + + def _initialize_tp_group(self): + """ + Implementation of our TP group initialization. + """ + init_distributed() + local_rank = int(os.getenv("LOCAL_RANK", 0)) + get_accelerator().set_device(local_rank) + + if local_rank >= self._config.tensor_parallel.tp_size: + raise RuntimeError("Local rank is greater than TP size, ensure that the TP config is correct.") + + ranks = list(range(self._config.tensor_parallel.tp_size)) + return dist.new_group(ranks=ranks) + + def put(self, + batch_uids: Iterable[int], + batch_tokens: Iterable[torch.Tensor], + do_checks: bool = True) -> torch.Tensor: + """ + Put a ragged batch onto the inference engine. This will perform one forward and return + a Tensor of the shape [len(batch_uids), *output_shape]. Logits for the non-final tokens + are not calculated. + + Arguments: + batch_uids: Iterable of uids for the batch on the host + batch_tokens: Iterable of token tensors for the batch on the host + do_checks: Check schedulability when it is set to True. You can skip this check for better performance when it has already been completed. + """ + + if do_checks: + token_lens = [len(tokens) for tokens in batch_tokens] + schedule_check = self.can_schedule(batch_uids, token_lens) + if schedule_check != SchedulingResult.Success: + raise SchedulingError(schedule_check) + + self._batch.clear() + for uid, tokens in zip(batch_uids, batch_tokens): + + host_seq_desc = self._state_manager.get_or_create_sequence(uid) + self._model.maybe_allocate_kv(host_seq_desc, tokens.numel()) + host_seq_desc.pre_forward(tokens.numel()) + + # We can disable checks since we already validated schedulability. + self._batch.insert_sequence(host_seq_desc, tokens, do_checks=do_checks) + + # Send all metadata to the device + self._batch.finalize() + + # Prep all data structures for the actual forward (in anticipation of CG in the future) + # and also to amortize some of the costs in a more straightforward way. + self._model.prepare_batch(self._batch) + + # Model implementation will pick up in the forward. + logits = self._model.forward(self._batch) + + # We return one set of logits per sequence in the batch (saves cost on unembedding) + assert logits.shape[0] == self._batch.current_sequences + + for uid in batch_uids: + host_seq_desc = self._state_manager.get_sequence(uid) + host_seq_desc.post_forward() # Updates sequence metadata. + self._model.maybe_free_kv(host_seq_desc) + + return logits + + def query(self, uid: int, max_request_tokens: int, max_request_blocks) -> Tuple[int, torch.Tensor]: + """ + Determine the number of tokens and KV blocks to reserve for a given request. Given a UID + (this UID may not be recognized by the model yet), this will return the number of tokens + and blocks to reserve for the request. + + Arguments: + uid (int): The UID of the sequence (as tracked by the scheduling entity). If + this is a new sequence (with a UID unknown to the inference engine), then + an empty placeholder is created to pass to the occupancy logic. + n_tokens (int): The number of tokens to hypothetically send. + + Returns: + Tuple[int, Optional[int]]: Tuple of free kv blocks and the number of blocks + required to schedule the sequence. + """ + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + if (self._state_manager.n_tracked_sequences == self._config.state_manager.max_tracked_sequences): + return (0, 0) + seq_desc = PlaceholderSequenceDescriptor() + + req_tokens, req_blocks = self._model.get_kv_requirements(seq_desc, max_request_tokens, max_request_blocks) + + return (req_tokens, req_blocks) + + def can_schedule(self, uids: Iterable[int], lengths: Iterable[int]) -> SchedulingResult: + """ + Dry run a batch to determine if it can be scheduled. Placeholder sequences will be + created for any UIDs that are unknown to the inference engine. + + Arguments: + uids (Iterable[int]): Iterable of UIDs for the batch + lengths (Iterable[int]): Iterable of lengths for each sequence of the batch. This lengths + corresponds to the number of tokens to send in the hypothetical forward; history + tokens will be determined via UID lookup and future tokens are disregarded. + + Returns: + bool: True if the batch can be scheduled, False otherwise. + """ + + cur_seqs = self._state_manager.n_tracked_sequences + free_blocks = self._state_manager.free_blocks + req_blocks = 0 + batch_len = 0 + + if len(uids) > self._config.state_manager.max_ragged_sequence_count: + # Can only compose a batch from a limited number of sequences + return SchedulingResult.BatchSequenceLimitExceeded + + for uid, length in zip(uids, lengths): + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + cur_seqs += 1 + seq_desc = PlaceholderSequenceDescriptor() + + sched_len, sched_blocks = self._model.get_kv_requirements(seq_desc, length, free_blocks) + + if sched_len != length: + # We ran out of KV cache + return SchedulingResult.KVCacheLimitExceeded + + batch_len += length + free_blocks -= sched_blocks + + if cur_seqs > self._config.state_manager.max_tracked_sequences: + # Would run out of tracking metadata + return SchedulingResult.EngineSequenceLimitExceeded + + if batch_len > self._config.state_manager.max_ragged_batch_size: + # Would exceed the maximum batch size + return SchedulingResult.BatchTokenLimitExceeded + + return SchedulingResult.Success + + def get_remaining_block_capacity(self, uid: int) -> int: + """ + Get the remaining capacity of the last block already allocated. + """ + seq_desc = self._state_manager.get_sequence(uid) + if seq_desc is None: + return 0 + return self._model.get_remaining_block_capacity(seq_desc) + + def flush(self, uid: int) -> None: + """ + Remove all state associated with a sequence from the inference engine. + + Arguments: + uid (int): The UID of the sequence to flush. + """ + self._state_manager.flush_sequence(uid) + + def serialize(self, save_path: str) -> None: + """ + Serialize the model to a file. + + Arguments: + path (str): Path to the file to serialize to. + """ + param_file_name = make_param_filename(save_path, self._model.tp_rank, self._model.tp_size) + metadata_file_name = make_metadata_filename(save_path, self._model.tp_rank, self._model.tp_size) + + # Save the flattened parameters + + torch.save(self._model.flattened_params, param_file_name) + + json.dump(self._model.flattened_param_metadata.json(), open(metadata_file_name, "w")) + + if self._model.tp_rank == 0: + pickle.dump(self._model._config, open(os.path.join(save_path, "ds_model_config.pkl"), "wb")) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_parameter.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..4dcff16a4515ce37ed334c9b0b0d623eea5b2ac2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_parameter.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Dict + +import torch + +CORE_PARAM = "_ds_core_param_key" + +STR_TO_DTYPE = { + "torch.float32": torch.float32, + "torch.float64": torch.float64, + "torch.float16": torch.float16, + "torch.bfloat16": torch.bfloat16, + "torch.int64": torch.int64, + "torch.int32": torch.int32, + "torch.int16": torch.int16, + "torch.int8": torch.int8, + "torch.uint8": torch.uint8, + "torch.bool": torch.bool, +} + + +class InferenceParameter(torch.Tensor): + """ + An extension of the torch.Tensor class to support our inference focused features. One important + thing to note here is that an InferenceParam can be used a torch.Tensor, but outputs of + torch.Tensor operations will not be InferenceParams. + """ + + @staticmethod + def __new__(cls, tensor, *args, **kwargs): + new_tensor = super().__new__(cls, tensor, *args, **kwargs) + if hasattr(tensor, "_aux_attrs"): + setattr(new_tensor, "_aux_attrs", tensor.aux_attrs) + return new_tensor + + def to(self, *args, **kwargs): + new_tensor = super().to(*args, **kwargs) + if hasattr(self, "_aux_attrs"): + setattr(new_tensor, "_aux_attrs", self.aux_attrs) + try: + _ = torch.device(args[0]) + for name, attr in new_tensor.aux_attrs.items(): + new_attr = attr.to(*args, **kwargs) + setattr(new_tensor, name, new_attr) + new_tensor.aux_attrs[name] = new_attr + except: + pass + + return new_tensor + + @classmethod + def initialize(cls, core_param: torch.Tensor, **kwargs) -> 'InferenceParameter': + """ + Create the inference parameter. + """ + param = InferenceParameter(core_param) + setattr(param, "_aux_attrs", kwargs) + + for attr_name, attr in kwargs.items(): + if hasattr(param, attr_name): + raise ValueError(f"Attribute {attr_name} already exists on param.") + + if not isinstance(attr, torch.Tensor): + raise ValueError(f"Attribute {attr_name} must be a tensor.") + + setattr(param, attr_name, attr) + + return param + + @classmethod + def initialize_raw(self, **kwargs) -> 'InferenceParameter': + """ + All kwargs must be torch.Tensors and must include the core parameter. + """ + if CORE_PARAM not in kwargs: + raise ValueError(f"Must provide core parameter, with key {CORE_PARAM}.") + + return InferenceParameter.initialize(kwargs[CORE_PARAM], **kwargs) + + @property + def aux_attrs(self) -> Dict[str, torch.Tensor]: + """ + Dictionary of auxiliary attributes. + """ + return self._aux_attrs diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_utils.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7b2dd4237353d85b4249faafb2d8c3051289b2ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/inference_utils.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Dict + +import torch + +from enum import Enum, IntEnum + + +class NormTypeEnum(Enum): + LayerNorm: str = "layer_norm" + RMSNorm: str = "rms_norm" + + +class DtypeEnum(Enum): + # The torch dtype must always be the first value (so we return torch.dtype) + fp16 = torch.float16, "torch.float16", "fp16", "float16", "half" + fp32 = torch.float32, "torch.float32", "fp32", "float32", "float" + bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat" + int8 = torch.int8, "torch.int8", "int8" + + # Copied from https://stackoverflow.com/a/43210118 + # Allows us to use multiple values for each Enum index and returns first + # listed value when Enum is called + def __new__(cls, *values): + obj = object.__new__(cls) + # first value is canonical value + obj._value_ = values[0] + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + def __repr__(self): + return "<%s.%s: %s>" % ( + self.__class__.__name__, + self._name_, + ", ".join([repr(v) for v in self._all_values]), + ) + + +ELEM_SIZES: Dict[torch.dtype, int] = { + torch.float16: 2, + torch.bfloat16: 2, + torch.float32: 4, + torch.float64: 8, + torch.int8: 1, + torch.uint8: 1, + torch.int16: 2, + torch.int32: 4, + torch.int64: 8, + torch.bool: 1, +} + + +class ActivationType(IntEnum): + """ + Types of activations supported by DS-Inference + """ + + GELU = 0 + + RELU = 1 + + SILU = 2 + + GEGLU = 3 + + ReGLU = 4 + + SiGLU = 5 + + IDENTITY = 6 + + InvalidType = -1 + + +def is_gated(act_fn: ActivationType) -> bool: + """ + Return True if the given activation function is gated. + """ + if not isinstance(act_fn, ActivationType): + act_fn = ActivationType(act_fn) + + return act_fn in [ActivationType.GEGLU, ActivationType.ReGLU, ActivationType.SiGLU] + + +def elem_size(dtype: torch.dtype) -> int: + """ + Return size in bytes of the given dtype. + """ + try: + return ELEM_SIZES[dtype] + except KeyError: + raise ValueError("Unknown dtype size for {}".format(dtype)) + + +def ceil_div(a: int, b: int) -> int: + """ + Return ceil(a / b). + """ + return -(-a // b) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..77afe351cbea127c2f0f5dbc9a249dd71516ca31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/logging.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import logging + +from deepspeed.utils.logging import LoggerFactory + +inf_logger = None + + +def inference_logger(level: int = logging.INFO) -> logging.Logger: + """ + Create the inference logger. NOTE: Logging is not cost free. On a 3960X, + there is a cost of about 6 us per call to a no-op logger, so this should + be used during setup only and not during the inference loop. + + Args: + level (int, optional): The logging level. Defaults to logging.INFO. + """ + global inf_logger + if inf_logger is None: + inf_logger = LoggerFactory.create_logger(name="DS-Inference", level=level) + inf_logger.debug("Inference logger created.") + return inf_logger diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..917c1599de2e6124ef7ee09f73a655053f6acb7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from . import implementations +from . import interfaces +from .module_registry import ConfigBundle diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3429e69b47de32cd14346b8fd4ad1c7d0ac460c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .attention_configs import ( + DSSelfAttentionConfig, + PositionalEmbeddingType, + MaskingType, + RotateHalfConfig, +) +from .embedding_config import DSEmbeddingsConfig +from .linear_config import DSLinearConfig +from .moe_config import DSMoEConfig +from .norm_config import DSNormConfig, NormTypeEnum +from .unembed_config import DSUnembedConfig diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/attention_configs.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/attention_configs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e257fcb3ebe89c37df667bc487ee1262c3024ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/attention_configs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/embedding_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/embedding_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac42d01c918f45727ec2dbb22984e000d38c3980 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/embedding_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/linear_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/linear_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6abab8fe393566243e859b069d9edcca464c0306 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/linear_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/norm_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/norm_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d91dcbad9f1384b47304785b95080341e94940fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/norm_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/attention_configs.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/attention_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..be6a3535024c1e2e90d89d57dca9efb454b8889f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/attention_configs.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum +from typing import Dict, Optional + +from ...inference_utils import DtypeEnum +from ...modules.ds_module import DSModuleConfig +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + + +class PositionalEmbeddingType(Enum): + + # No positional embeddings + none = "none" + + # Rotary positional embeddings - every half + rotate_half = "rotate_half" + + # Rotary positional embeddings - every other + rotate_every_other = "rotate_every_other" + + # Alibi + alibi = "alibi" + + +class RotateHalfConfig(DeepSpeedConfigModel): + + use_trained_freqs: bool = False + """ + Whether to use a passed `trained_freqs` tensor for the attention implementation + or to use default synthesized frequencies. + """ + + theta_base: float = 10_000.0 + """ + Base for theta. This will only be used if `use_trained_freqs` is False. + """ + + rotate_dim: Optional[int] = None + """ + How many neurons to rotate. If None, then all neurons will be rotated. Many external configs + will set this number to half the head dimension and then internally multiply by 2. To make it + more clear to understand what is happening (rotate_dim < head_dim -> then only partial rotation), + we do not do this multiplication internally. + """ + + +class MaskingType(Enum): + + # No masking + none = "none" + + # Causal masking + causal = "causal" + + # Local masking + local = "local" + + # Symmetric masking (this is a 1D tensor mask) + symmetric = "symmetric" + + # Arbitrary masking (this would correspond to a 2D tensor mask) + asymmetric = "asymmetric" + + +class DSSelfAttentionConfig(DSModuleConfig): + """ + Config class for attention. + """ + + # Number of query attention heads on this shard + n_heads_q: int + + # Number of KV attention heads on this shard + n_heads_kv: int + + # Size of each attention head + head_size: int + + # Max number of sequences that may compose a ragged batch + max_sequences: int + + # Scale factor for attention scores + scale_factor: float = 1.0 + + # Input data type + input_dtype: DtypeEnum = DtypeEnum.fp16 + + # Output data type + output_dtype: DtypeEnum = DtypeEnum.fp16 + + # Masking type + masking_type: MaskingType = MaskingType.causal + + # Masking args + masking_args: Dict = {} + + # Positional embedding type + positional_embedding_type: PositionalEmbeddingType = PositionalEmbeddingType.none + + # Positional embedding args + positional_embedding_config: Optional[RotateHalfConfig] = None + """ + To extend this for the other positional embedding types, we would need to add + new configs for each type (as necessary) and annotate this with the + Union[RotateHalfConfig, OtherConfig, ...] type. + """ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/embedding_config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/embedding_config.py new file mode 100644 index 0000000000000000000000000000000000000000..2486c5986e9531c7377f1dddbe81ebafb6ef377f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/embedding_config.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +from ...inference_utils import DtypeEnum, NormTypeEnum +from ...modules.ds_module import DSModuleConfig +""" +Trying to define the space we need to support here right now: + +Types of embeddings I've found so far: + 1. Token embedding + 2. Position embedding + 3. Token type embedding + 4. LN + +GPTNeo: 1, 2, 3 (shared with 1) +GPTNeoX: 1 +GPTJ: 1, 3 +LLaMA: 1 +BERT: 1, 2, 3, 4 +GPT2: 1, 2, 3 (shared with 1) + +Sidebar for OPT: +OPT: 1, 2 +1 may not actually project to the actual hidden dimension according to the raw +code, but for the model configs we care about it does. +2 has a weird offset associated with it that the others do not. +""" + + +class DSEmbeddingsConfig(DSModuleConfig): + """ + Config class for DSEmbeddings. + """ + + residual_dtype: DtypeEnum = DtypeEnum.fp16 + """ + Data type the module should use for its output. + """ + + embedding_dim: int + """ + Dimensionality of the embedding projections. + """ + + positional_embedding: bool = False + """ + Whether the module should expect a positional embedding matrix. The shape of this + matrix should be of shape [max_seq_len + positional_offset, embedding_dim] + """ + + positional_offset: int = 0 + """ + Whether the linearized token IDs should be offset by a certain amount. For an example + of this, see the OPT model implementation. + """ + + use_token_type: bool = False + """ + Whether the module should expect a token type embedding matrix. + """ + + output_normalization: Optional[NormTypeEnum] = None + """ + If a the output of the embedding module should be normalized, specify here. See + ``inference.inference_utils.NormTypeEnum`` for supported values. + """ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/linear_config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/linear_config.py new file mode 100644 index 0000000000000000000000000000000000000000..40fe0773aeeee92d505a115f85c440a753491329 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/linear_config.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ...inference_utils import ActivationType, DtypeEnum +from ...modules.ds_module import DSModuleConfig + + +class DSLinearConfig(DSModuleConfig): + """ + Config class for DSLinearBase. + """ + + in_channels: int + """ + Number of input channels + """ + + out_channels: int + """ + Number of output channels. NOTE: If this linear layer is using a gated activation function, + the value for ``out_channels`` passed here should refer to the number of channels after + gating (i.e., the expected weight shape before transformations will be ``[out_channels * 2, in_channels]``). + """ + + activation: ActivationType = ActivationType.IDENTITY + """ + The activation function for this layer. See :class:`deepspeed.inference.inference_utils.ActivationType` for + supported activation functions. + """ + + input_dtype: DtypeEnum = DtypeEnum.fp16 + """ + The data type of the input tensor. See :class:`deepspeed.inference.inference_utils.DtypeEnum` for supported + data types. + """ + + output_dtype: DtypeEnum = DtypeEnum.fp16 + """ + The data type of the output tensor. See :class:`deepspeed.inference.inference_utils.DtypeEnum` for supported + data types. + """ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/moe_config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/moe_config.py new file mode 100644 index 0000000000000000000000000000000000000000..7bc944f55e17cf4e837c2a5db1144099dd942fdd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/moe_config.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ...inference_utils import ActivationType, DtypeEnum +from ...modules.ds_module import DSModuleConfig + + +class DSMoEConfig(DSModuleConfig): + """ + Config class for DSMoEBase + """ + + model_dim: int + """ + Size of input activation. + """ + + intermediate_features: int + """ + Size of intermediate activation. Specifically, this is the number of input features + in the second linear layer. Depending on the activation function, the output of the first + linear layer may have increased dimensionality. + """ + + n_experts: int + """ + Number of experts. + """ + + top_k: int = 1 + """ + top-k gating function (like top-1 or top-2) + """ + + input_dtype: DtypeEnum = DtypeEnum.fp16 + """ + Data type for the input activations. + """ + + output_dtype: DtypeEnum = DtypeEnum.fp16 + """ + Data type for the output activations. + """ + + activation: ActivationType = ActivationType.IDENTITY + """ + Activation function of the first MLP1 + """ + + normalize_scores: bool = False + """ + Whether normalization is applied to the selected scores. If true, the module + should rescale the scores such that their sum is 1.0. + """ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/norm_config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/norm_config.py new file mode 100644 index 0000000000000000000000000000000000000000..358982253756af4c4065d2e8cc53f7d0dd0b0287 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/norm_config.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ...inference_utils import DtypeEnum, NormTypeEnum +from ...modules.ds_module import DSModuleConfig + + +class DSNormConfig(DSModuleConfig): + """ + Config class for both DSPreLN and DSPostLN. + """ + + # Type of normalization + type: NormTypeEnum + + # Number of channels in the model embedding + channels: int + + # Data type of the residual input/outputs (we assume the residual must + # be the same data type for the entire model). + residual_dtype: DtypeEnum = DtypeEnum.fp16 + + # Data type of the hidden states input + input_dtype: DtypeEnum = DtypeEnum.fp16 + + # Data type of the hidden states output + output_dtype: DtypeEnum = DtypeEnum.fp16 + + # Epsilon value for numerical stability + eps: float = 1e-5 diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/unembed_config.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/unembed_config.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4cc3cc99c17af9201db4774ebc0d095539dd82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/unembed_config.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ...inference_utils import DtypeEnum, NormTypeEnum +from ...modules.ds_module import DSModuleConfig +from typing import Optional + + +class DSUnembedConfig(DSModuleConfig): + """ + Config class for DSUnembed + """ + + dtype: DtypeEnum = DtypeEnum.fp16 + """ + Expected data type. + """ + + norm_type: Optional[NormTypeEnum] = None + """ + Whether the input to the unembed is normalized prior to the unembedding projection. + """ + + model_dim: int + """ + Model embedding size. + """ + + max_sequences: int + """ + Max sequences composing the ragged batch. + """ + + vocab_size: int + """ + Local vocab size (the full vocab size may have been sharded across model parallel ranks) + """ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/ds_module.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/ds_module.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6d294f32666cf46ab918ba03150d3f17ae37ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/ds_module.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractstaticmethod +from typing import Any, Dict, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + + +class DSModuleConfig(DeepSpeedConfigModel): + + max_tokens: int + + +class DSModuleBase(torch.nn.Module, ABC): + """ + Base class for all DeepSpeed Inference modules. This class establishes + the basic attributes of a DSModule. Only abstract functionality modules should inherit + directly from this class, not specific implementations. + """ + + @abstractstaticmethod + def name() -> str: + """ + Return a memorable, human-readable name for this module. + + This will be used as a key in custom inference configurations and should only + be implemented by the children of functionality modules. + """ + ... + + @abstractstaticmethod + def config_class() -> Type[DSModuleConfig]: + """ + Return the associated config class for this module. + + This should be implemented (along with the config class) by an abstract functionality + module. + """ + ... + + @abstractstaticmethod + def supports_config(config: DSModuleConfig) -> bool: + """ + Return whether or not this module supports the given config. + + This should be implemented by the children of functionality modules and should report + whether it would be feasible to instantiate this module with the given config. + """ + ... + + def __init__(self, config: DSModuleConfig, implementation_config: Dict[str, Any] = {}) -> None: + """ + Initialize the module with the given config. + """ + super().__init__() + self._config = config + self._implementation_config = implementation_config diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/heuristics.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/heuristics.py new file mode 100644 index 0000000000000000000000000000000000000000..f719e299a4b2eb99b40c58807a14d71ac2cc2319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/heuristics.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config_v2 import RaggedInferenceEngineConfig +from ..inference_utils import NormTypeEnum + +from .module_registry import ConfigBundle +from ..modules.configs import ( + DSEmbeddingsConfig, + DSLinearConfig, + DSMoEConfig, + DSNormConfig, + DSSelfAttentionConfig, + DSUnembedConfig, +) +from ..modules.interfaces import ( + DSEmbeddingBase, + DSEmbeddingRegistry, + DSLinearBase, + DSLinearRegistry, + DSMoEBase, + DSMoERegistry, + DSPostNormBase, + DSPostNormRegistry, + DSPreNormBase, + DSPreNormRegistry, + DSSelfAttentionBase, + DSSelfAttentionRegistry, + DSUnembedBase, + DSUnembedRegistry, +) + + +def instantiate_attention(attention_config: DSSelfAttentionConfig, + engine_config: RaggedInferenceEngineConfig) -> DSSelfAttentionBase: + """ + Choose an appropriate attention implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + attention_config (DSSelfAttentionConfig): Configuration for the attention module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + An attention module implementing the given configuration. + """ + + # Currently, we only have one implementation, so we just return it. + config = ConfigBundle(name="dense_blocked_attention", config=attention_config) + return DSSelfAttentionRegistry.instantiate_config(config) + + +def instantiate_embed(embed_config: DSEmbeddingsConfig, engine_config: RaggedInferenceEngineConfig) -> DSEmbeddingBase: + """ + Choose an appropriate embedding implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + embed_config (DSEmbeddingsConfig): Configuration for the embedding module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + An embedding module implementing the given configuration. + """ + + # Currently, we only have one implementation, so we just return it. + config = ConfigBundle(name="ragged_embedding", config=embed_config) + return DSEmbeddingRegistry.instantiate_config(config) + + +def instantiate_linear(linear_config: DSLinearConfig, engine_config: RaggedInferenceEngineConfig) -> DSLinearBase: + """ + Choose an appropriate linear implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + linear_config (DSLinearConfig): Configuration for the linear module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + A linear module implementing the given configuration. + """ + + quantization_mode = engine_config.quantization.quantization_mode + if quantization_mode is None: + config = ConfigBundle(name="blas_fp_linear", config=linear_config) + else: + # Currently, we only support ``quantized_wf6af16_linear`` on NVIDIA Ampere GPUs. + if quantization_mode == "wf6af16": + import torch + if not torch.cuda.is_available(): #ignore-cuda + raise ValueError("WF6AF16 quantization is only supported on CUDA") + else: + is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None + if is_rocm_pytorch: + raise ValueError("WF6AF16 quantization is only supported on NVIDIA GPUs") + elif torch.cuda.get_device_properties(0).major != 8: #ignore-cuda + raise ValueError("WF6AF16 quantization is only supported on Ampere architectures") + config = ConfigBundle(name="quantized_wf6af16_linear", config=linear_config) + else: + raise ValueError(f"Unsupported quantization mode: {quantization_mode}") + return DSLinearRegistry.instantiate_config(config) + + +def instantiate_moe(moe_config: DSMoEConfig, engine_config: RaggedInferenceEngineConfig) -> DSMoEBase: + """ + Choose an appropriate MoE implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + moe_config (DSMoEConfig): Configuration for the MoE module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + A MoE module implementing the given configuration. + """ + + moe_type = "cutlass_multi_gemm_moe" + + if moe_type == "cutlass_multi_gemm_moe": + # TODO: Get this off an engine config + implementation_config = { + "weight_dtype": moe_config.input_dtype, + } + + # Currently, we only have one implementation, so we just return it. + config = ConfigBundle(name="cutlass_multi_gemm_moe", + config=moe_config, + implementation_config=implementation_config) + return DSMoERegistry.instantiate_config(config) + + +def instantiate_post_norm(norm_config: DSNormConfig, engine_config: RaggedInferenceEngineConfig) -> DSPostNormBase: + """ + Choose an appropriate post-norm implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + norm_config (DSNormConfig): Configuration for the post-norm module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + A post-norm module implementing the given configuration. + """ + + # Currently, we only have one implementation, so we just return it. + config = ConfigBundle(name="cuda_post_ln", config=norm_config) + return DSPostNormRegistry.instantiate_config(config) + + +def instantiate_pre_norm(norm_config: DSNormConfig, engine_config: RaggedInferenceEngineConfig) -> DSPreNormBase: + """ + Choose an appropriate pre-norm implementation based on the given configurations. Currently, + this will select between two CUDA implementations, one for LayerNorm and one for RMSNorm. + + Arguments: + norm_config (DSNormConfig): Configuration for the pre-norm module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + A pre-norm module implementing the given configuration. + """ + if NormTypeEnum(norm_config.type) == NormTypeEnum.LayerNorm: + module_name = "cuda_pre_ln" + elif NormTypeEnum(norm_config.type) == NormTypeEnum.RMSNorm: + module_name = "cuda_pre_rms" + + config = ConfigBundle(name=module_name, config=norm_config) + return DSPreNormRegistry.instantiate_config(config) + + +def instantiate_unembed(unembed_config: DSUnembedConfig, engine_config: RaggedInferenceEngineConfig) -> DSUnembedBase: + """ + Choose an appropriate unembedding implementation based on the given configurations. This + method is currently a stub, but as more implementations may be developed we can centralize + the logic for choosing between them here. + + Arguments: + unembed_config (DSUnembedConfig): Configuration for the unembed module. + engine_config (RaggedInferenceEngineConfig): Configuration for the inference engine. + + Returns: + An unembed module implementing the given configuration. + """ + + # Currently, we only have one implementation, so we just return it. + config = ConfigBundle(name="ragged_unembed", config=unembed_config) + return DSUnembedRegistry.instantiate_config(config) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff593ac1368235cc03b427532154ec92b8bbe82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .dense_blocked_attention import DSDenseBlockedAttention diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc1f9cd1c816e67405d3540c8ce258b7099a9f12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/dense_blocked_attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/dense_blocked_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90ac16ba32417f9cd282c5bcac4a20bac9a86acb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/__pycache__/dense_blocked_attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/dense_blocked_attention.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/dense_blocked_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..3515b3c2b690c6346db7afba02b5790a2eb98982 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/attention/dense_blocked_attention.py @@ -0,0 +1,180 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from ....allocator import empty_from +from ....inference_utils import DtypeEnum +from ....kernels.ragged_ops import ( + AtomBuilder, + BlockedFlashAttn, + BlockedRotaryEmbeddings, + BlockedTrainedRotaryEmbeddings, + get_q_block_size, + get_kv_block_size, + LinearBlockedKVCopy, +) +from ....ragged import RaggedBatchWrapper, split_kv +from deepspeed.ops.op_builder import RaggedUtilsBuilder + +from ...interfaces import DSSelfAttentionBase, DSSelfAttentionRegistry +from ...configs import DSSelfAttentionConfig, PositionalEmbeddingType, MaskingType + +try: + from functools import cached_property +except ImportError: + + def cached_property(func): + return property(func) + + +@DSSelfAttentionRegistry.register_module +class DSDenseBlockedAttention(DSSelfAttentionBase): + """ + Self attention implementation for dense, blocked self attention. + """ + + @staticmethod + def name() -> str: + return 'dense_blocked_attention' + + @staticmethod + def supports_config(config: DSSelfAttentionConfig) -> bool: + + if config.input_dtype != config.output_dtype: + return False + + if DtypeEnum(config.input_dtype) not in (DtypeEnum.fp16, DtypeEnum.bf16): + return False + + if PositionalEmbeddingType(config.positional_embedding_type) not in [ + PositionalEmbeddingType.none, PositionalEmbeddingType.rotate_half + ]: + return False + + if MaskingType(config.masking_type) != MaskingType.causal: + return False + + return True + + def __init__(self, config: DSSelfAttentionConfig, implementation_config: Dict[str, Any]) -> None: + """ + Create the Attention DSModule. + + Args: + config (DSSelfAttentionConfig): The self attention config for all attention DSModules. + implementation_config (Dict[str, Any]): + There are two (dependent) potential components in the implementtion config. + + 1. `trained_freqs` - If the embedding weights for RoPE are trained, the implementation + config should contain {'trained_freqs': True}. This will mean the implementation will + expect a `trained_freqs` tensor in the `forward` method and will not synthesize the + values internally. + + 2. `theta_base` - The base value for synthesized frequencies in the rotary embeddings. + This will only be used if `trained_freqs` is False or not present in the `implementation_config`. If this is not included, the default value of 10000.0 will be used. + """ + super().__init__(config, implementation_config) + + embed_type = PositionalEmbeddingType(config.positional_embedding_type) + if embed_type == PositionalEmbeddingType.none: + self._kv_copy = LinearBlockedKVCopy(self._config.head_size, self._config.n_heads_q, + self._config.n_heads_kv, self._config.input_dtype) + elif embed_type == PositionalEmbeddingType.rotate_half: + rotary_config = config.positional_embedding_config + assert rotary_config is not None, "Rotary config must be provided if using rotate_half as Positional Embedding Type." + + if rotary_config.use_trained_freqs: + # Theta and rotary dim are effectively embedded into either the values (theta) or the shape (rotary_dim) + # of the trained_freqs tensor. + self._kv_copy = BlockedTrainedRotaryEmbeddings(self._config.head_size, self._config.n_heads_q, + self._config.n_heads_kv, self._config.input_dtype) + else: + theta_base = rotary_config.theta_base + rotary_dim = rotary_config.rotate_dim if rotary_config.rotate_dim is not None else self._config.head_size + self._kv_copy = BlockedRotaryEmbeddings(self._config.head_size, self._config.n_heads_q, + self._config.n_heads_kv, self._config.input_dtype, rotary_dim, + theta_base) + + self._softmax_scale = self._config.scale_factor + + # TODO(cmikeh2): Attention kernel gets created here. + self._attn_kernel = BlockedFlashAttn(self._config.head_size, self._config.input_dtype) + self._atom_builder = AtomBuilder() + + self.model_dim = self._config.head_size * self._config.n_heads_q + self._output = torch.empty((self._config.max_tokens, self._config.head_size * self._config.n_heads_q), + dtype=self._config.output_dtype, + device=get_accelerator().current_device()) + + # TODO(cmikeh2): Pre-allocate storage buffer for the attention atoms. + self._max_atoms = self._config.max_sequences + self._atoms = torch.empty((self._max_atoms, 8), dtype=torch.int32, device=get_accelerator().current_device()) + + alloc_func = RaggedUtilsBuilder().load().allocate_fast_host_buffer + self._atoms_shadow = alloc_func(self._atoms) + self._cur_atoms = 0 + + @cached_property + def kv_block_size(self) -> int: + """ + Return preferred granulatity for blocked KV-cache implementation. + """ + return get_kv_block_size(self._config.head_size) + + @cached_property + def q_block_size(self) -> int: + """ + Property to calculate blocking granularity for the query dimension. + This has no impact on the KV-cache structure, but will affect the + number of attention atoms associated with a batch. + """ + return get_q_block_size(self._config.head_size) + + def build_atoms(self, ragged_batch: RaggedBatchWrapper) -> None: + """ + Build the atoms for the attention kernel. + + Args: + ragged_batch (RaggedBatchWrapper): The input ids and associated ragged batch metadata. + """ + host_atoms, n_atoms = self._atom_builder(self._atoms_shadow, ragged_batch, self.q_block_size, + self.kv_block_size) + + self._cur_atoms = n_atoms + self._atoms[:n_atoms].copy_(host_atoms[:n_atoms], non_blocking=True) + + def forward(self, + q_k_v: torch.Tensor, + kv_cache: torch.Tensor, + batch: RaggedBatchWrapper, + inv_freqs: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Forward implementation. + + Args: + q_k_v (torch.Tensor): Query/Key/Value projection Tensor of shape + [n_heads, (n_heads_q + 2 * n_heads_kv) * head_size]. + kv_cache (torch.Tensor): Blocked persistent cache of shape + [2, batch, block_size, n_heads_kv, head_size]. + batch (RaggedBatchWrapper): The input ids and associated ragged batch metadata. + inv_freqs (Optional[torch.Tensor]): The inverse frequencies for the rotary embeddings if they + have been modified from synthesizable values. + """ + if inv_freqs is not None: + self._kv_copy(kv_cache, q_k_v, batch, inv_freqs) + else: + self._kv_copy(kv_cache, q_k_v, batch) + + q = q_k_v[:, :self._config.head_size * self._config.n_heads_q] + output = empty_from(self._output, q.shape) + k_cache, v_cache = split_kv(kv_cache) + + self._attn_kernel(output, q, k_cache, v_cache, self._atoms[:self._cur_atoms], self._softmax_scale) + + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..053ad5da77460974cbc34361dcd887c5ea2851d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cutlass_multi_gemm import DSMultiGemmMoE diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2378f864d0ff1f53c67d8612848d2e348253ba64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/cutlass_multi_gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/cutlass_multi_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4b9948c02808d5f731043aa6fcaa63f409a6c4a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/__pycache__/cutlass_multi_gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..38c0000d7f781404c86813ae5ee2d29582219491 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py @@ -0,0 +1,249 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional, Tuple + +import torch + +from deepspeed.accelerator import get_accelerator +from ....allocator import empty_from +from ....inference_utils import ActivationType, is_gated +from ....kernels.core_ops import BlasLibLinear, CUDAGatedActivation +from ....kernels.ragged_ops import ( + MoEGather, + MoEScatter, + RaggedTopKGating, +) +from ....ragged import RaggedBatchWrapper + +from ...interfaces import DSMoEBase, DSMoERegistry +from ...configs import DSMoEConfig +from ....kernels.cutlass_ops import MoEGEMM +from ....inference_parameter import InferenceParameter + + +@DSMoERegistry.register_module +class DSMultiGemmMoE(DSMoEBase): + """ + MoE implementation based on the CUTLASS multi-GEMM. + """ + + @staticmethod + def name(): + return 'cutlass_multi_gemm_moe' + + @staticmethod + def supports_config(config: DSMoEConfig) -> bool: + if config.input_dtype != config.output_dtype: + return False + + if config.input_dtype != torch.float16 and config.input_dtype != torch.bfloat16: + return False + + if config.top_k != 1 and config.top_k != 2: + return False + + return True + + def __init__(self, config: DSMoEConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + # Convenience variables for frequently accessed items. + self.max_tokens = self._config.max_tokens + self.n_experts = self._config.n_experts + self.n_top_k = self._config.top_k + self.intermediate_dim = self._config.intermediate_features + + moe_op_act_fn = ActivationType.IDENTITY if is_gated(self._config.activation) else self._config.activation + + self._mlp_1 = MoEGEMM(fp_dtype=implementation_config['weight_dtype'], act_fn=moe_op_act_fn) + self._mlp_2 = MoEGEMM(fp_dtype=implementation_config['weight_dtype'], act_fn=ActivationType.IDENTITY) + + if is_gated(self._config.activation): + self._activation = CUDAGatedActivation(self._config.model_dim, self._config.input_dtype, + self._config.activation) + else: + self._activation = None + + self._gate_proj = BlasLibLinear(self._config.input_dtype) + self._top_1_gate = RaggedTopKGating(config.input_dtype) + self._moe_scatter = MoEScatter(config.input_dtype, config.model_dim) + self._moe_gather = MoEGather(config.input_dtype, config.model_dim, config.normalize_scores) + + self._create_buffers() + + def _create_buffers(self): + + # Gating buffers + self._logits = torch.empty((self._config.max_tokens, self.n_experts), + dtype=self._config.input_dtype, + device=get_accelerator().current_device()) + self._expert_counts = torch.empty((self.n_experts, ), + dtype=torch.int32, + device=get_accelerator().current_device()) + self._scores = torch.empty((self._config.max_tokens, self.n_top_k), + dtype=torch.float32, + device=get_accelerator().current_device()) + self._assignments = torch.empty((self._config.max_tokens, self.n_top_k), + dtype=torch.int32, + device=get_accelerator().current_device()) + self._offsets = torch.empty((self._config.max_tokens, self.n_top_k), + dtype=torch.int32, + device=get_accelerator().current_device()) + + # Scatter buffers + self._moe_input = torch.empty((self._config.max_tokens * self.n_top_k, self._config.model_dim), + dtype=self._config.input_dtype, + device=get_accelerator().current_device()) + self._expert_cumsum = torch.empty((self._config.n_experts, ), + dtype=torch.int64, + device=get_accelerator().current_device()) + self._mapped_slots = torch.empty((self._config.max_tokens, self.n_top_k), + dtype=torch.int32, + device=get_accelerator().current_device()) + + # GEMM Buffers + self._intermediate = torch.empty((self._config.max_tokens * self.n_top_k, self._config.intermediate_features), + dtype=self._config.output_dtype, + device=get_accelerator().current_device()) + if self._activation is not None: + self._gated_intermediate = torch.empty( + (self._config.max_tokens * self.n_top_k, self._config.intermediate_features * 2), + dtype=self._config.output_dtype, + device=get_accelerator().current_device()) + + self._output_unordered = torch.empty((self._config.max_tokens * self.n_top_k, self._config.model_dim), + dtype=self._config.output_dtype, + device=get_accelerator().current_device()) + + # Gather buffer + self._output = torch.empty((self._config.max_tokens, self._config.model_dim), + dtype=self._config.output_dtype, + device=get_accelerator().current_device()) + + def transform_gate_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Ensures gate param is going to match the activation data type. + """ + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) + + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Converts param to same data type as input and output. + + Parameters: + param (torch.Tensor): Weight or bias tensor. + """ + param = param.to(self._config.input_dtype) + + if len(param.shape) == 3: + param = param.permute(0, 2, 1).contiguous() + return InferenceParameter.initialize(param) + + def transform_moe_mlp_2_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Converts param to same data type as input and output. + + Parameters: + param (torch.Tensor): Weight or bias tensor. + """ + param = param.to(self._config.input_dtype) + + if len(param.shape) == 3: + param = param.permute(0, 2, 1).contiguous() + return InferenceParameter.initialize(param) + + @property + def output(self) -> torch.Tensor: + return self._output + + def _gate(self, hidden_states: torch.Tensor, batch_metadata: RaggedBatchWrapper, + gate_w: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Helper function to isolate the logit for gating. This will take the hidden states + and produce the metadata + tensors for the CUTLASS ragged GEMMs. If the input has + been padded for CG, this will strip the padding for MoE. + + Parameters: + hidden_states (torch.Tensor): Hidden states tensor. Expected shape is [n_tokens, model_dim]. + batch_metadata (RaggedBatchWrapper): Batch metadata for the hidden states. + gate_w (torch.Tensor): Gate weight tensor. Expected shape is [num_experts, model_dim]. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: The MoE input, the cumsum of the offsets (for the MoE kernels themselves), the scores, and the mapped slots (to recover the original order of the tokens) + """ + + # Get views on the buffers for gating + logits = empty_from(self._logits, (hidden_states.shape[0], self._logits.shape[-1])) + scores = empty_from(self._scores, (hidden_states.shape[0], self.n_top_k)) + assignments = empty_from(self._assignments, (hidden_states.shape[0], self.n_top_k)) + offsets = empty_from(self._offsets, (hidden_states.shape[0], self.n_top_k)) + mapped_slots = empty_from(self._mapped_slots, (hidden_states.shape[0], self.n_top_k)) + moe_input = empty_from(self._moe_input, (hidden_states.shape[0] * self.n_top_k, self._moe_input.shape[-1])) + + self._gate_proj(logits, hidden_states, gate_w) + self._expert_counts.zero_() + self._top_1_gate(self._expert_counts, scores, assignments, offsets, logits, batch_metadata) + self._moe_scatter(moe_input, self._expert_cumsum, mapped_slots, hidden_states, self._expert_counts, + assignments, offsets) + + return moe_input, self._expert_cumsum, scores, mapped_slots + + def forward(self, + hidden_states: torch.Tensor, + batch_metadata: RaggedBatchWrapper, + gate_w: torch.Tensor, + mlp_1_w: torch.Tensor, + mlp_2_w: torch.Tensor, + mlp_1_b: Optional[torch.Tensor] = None, + mlp_2_b: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + MoE forward pass built on top of CUTLASS multi-GEMM. + + Parameters: + hidden_states (torch.Tensor): Hidden states tensor. Expected shape is [batch, seq_len, model_dim]. + gate_w (torch.Tensor): Gate weight tensor. Expected shape is [num_experts, model_dim]. + """ + + moe_input, expert_cumsum, scores, mapped_slots = self._gate(hidden_states, batch_metadata, gate_w) + + # Get views on the buffers for GEMM + intermediate = empty_from(self._intermediate, + (hidden_states.shape[0] * self.n_top_k, self._intermediate.shape[-1])) + output_unordered = empty_from(self._output_unordered, + (hidden_states.shape[0] * self.n_top_k, self._output_unordered.shape[-1])) + output = empty_from(self._output, (hidden_states.shape[0], self._output.shape[-1])) + + if self._activation is not None: + gated_intermediate = empty_from( + self._gated_intermediate, (hidden_states.shape[0] * self.n_top_k, self._gated_intermediate.shape[-1])) + self._mlp_1( + gated_intermediate, + moe_input, + mlp_1_w, + expert_cumsum, + mlp_1_b, + ) + self._activation(intermediate, gated_intermediate) + else: + self._mlp_1( + intermediate, + moe_input, + mlp_1_w, + expert_cumsum, + mlp_1_b, + ) + + self._mlp_2( + output_unordered, + intermediate, + mlp_2_w, + expert_cumsum, + mlp_2_b, + ) + + self._moe_gather(output, output_unordered, scores, mapped_slots, self._expert_counts) + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc71d3c1fc7ac220b71340dd65e0ee433ad630b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/cuda_post_ln.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/cuda_post_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aa17257ac98a948bd7ced450fc40f03a74f658e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__pycache__/cuda_post_ln.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..12605f13f955e00b598516a97a8cc7d0369f7405 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cuda_pre_ln import DSPreLNCUDAModule +from .cuda_pre_rms import DSPreRMSCUDAModule diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/module_registry.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/module_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..e04b8d734518b2322fe3f07a38c5d01222e4ecbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/module_registry.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractstaticmethod +from typing import Any, Dict, Type + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from .ds_module import DSModuleBase + + +class ConfigBundle(DeepSpeedConfigModel): + """ + A config bundle is a collection of configs that are used to instantiate a model implementation. + """ + name: str + config: DeepSpeedConfigModel + implementation_config: Dict[str, Any] = {} + + +class DSModuleRegistryBase(ABC): + """ + Class holding logic for tracking the DSModule implementations of a given interface. + """ + + @classmethod + def instantiate_config(cls, config_bundle: ConfigBundle) -> DSModuleBase: + """ + Given a DSModule key, attempt to instantiate + """ + if config_bundle.name not in cls.registry: + raise KeyError(f"Unknown DSModule: {config_bundle.name}, cls.registry={cls.registry}") + + target_implementation = cls.registry[config_bundle.name] + if not target_implementation.supports_config(config_bundle.config): + raise ValueError(f"Config {config_bundle.config} is not supported by {target_implementation}") + + return cls.registry[config_bundle.name](config_bundle.config, config_bundle.implementation_config) + + @abstractstaticmethod + def associated_class() -> Type[DSModuleBase]: + """ + Return the class associated with this registry. + """ + raise NotImplementedError("Must associated a DSModule class with its registry.") + + @classmethod + def register_module(cls, child_class: DSModuleBase) -> None: + """ + Register a module with this registry. + """ + if not issubclass(child_class, cls.associated_class()): + raise TypeError( + f"Can only register subclasses of {cls.associated_class()}, {child_class} does not inherit from {cls.associated_class()}" + ) + cls.registry[child_class.name()] = child_class + return child_class diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d3818d46675d9b00735454b3db021e770bb4947 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/scheduling_utils.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum + + +class SchedulingResult(Enum): + + Success = 0 + """ + The proposed batch is valid and can be scheduled. + """ + + EngineSequenceLimitExceeded = 1 + """ + The proposed batch would would overflow the number of concurrent sequences the engine may support. + """ + + BatchSequenceLimitExceeded = 2 + """ + The proposed batch contains more sequences than the engine was configured + to support in a single forwardp + """ + + BatchTokenLimitExceeded = 3 + """ + The proposed batch contains more tokens than the engine was configured + to support in a single forward. + """ + + KVCacheLimitExceeded = 4 + """ + The proposed batch would require more KV cache to be allocated than the engine + currently has available. + """ + + SequenceTokenLimitExceeded = 5 + """ + The proposed batch contains a sequence that is longer than the engine/model can support. + """ + + +class SchedulingError(RuntimeError): + + result: SchedulingResult + """ + The failed result of the scheduling check. Guaranteed to not be SchedulingResult.Success. + """ + + def __init__(self, result: SchedulingResult) -> None: + self.result = result + super().__init__(f"Batch scheduling failed with result {result}") diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/constants.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..366ae0b236f81a94b1a4ed2cecc01a1f3e1aa69e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/constants.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +PDSH_LAUNCHER = 'pdsh' +PDSH_MAX_FAN_OUT = 1024 + +OPENMPI_LAUNCHER = 'openmpi' +MPICH_LAUNCHER = 'mpich' +IMPI_LAUNCHER = 'impi' +SLURM_LAUNCHER = 'slurm' +MVAPICH_LAUNCHER = 'mvapich' +MVAPICH_TMP_HOSTFILE = '/tmp/deepspeed_mvapich_hostfile' + +ELASTIC_TRAINING_ID_DEFAULT = "123456789" diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/launcher_helper.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/launcher_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..05ce14bcc52ed9dd2c6182b30be541a1873569dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/launcher_helper.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import sys +import argparse +import subprocess +from deepspeed.utils import logger +from deepspeed.launcher.constants import MPICH_LAUNCHER + + +def parse_args(args=None): + parser = argparse.ArgumentParser(description="DeepSpeed launcher helper to map environment variables for" + "multi-node/multi-gpu training jobs.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument("--launcher", + default=MPICH_LAUNCHER, + type=str, + help="(optional) choose launcher backend for multi-node " + "training. Options currently include MPICH.") + + parser.add_argument("--module", + action="store_true", + help="Change each process to interpret the launch " + "script as a Python module, executing with the same " + "behavior as 'python -m'.") + + parser.add_argument("--no_python", + action="store_true", + help="Skip prepending the training script with " + "'python' - just execute it directly.") + + parser.add_argument("user_script", type=str, help="User script to launch, followed by any required " + "arguments.") + + parser.add_argument('user_args', nargs=argparse.REMAINDER) + + parser.add_argument("--bind_cores_to_rank", + action="store_true", + help="Bind each rank to different cores of the host") + + parser.add_argument("--bind_core_list", + type=str, + default=None, + help="List of cores to bind to with comma separated list of " + "numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not " + "specified, all cores on system would be used rank binding") + + return parser.parse_args(args=args) + + +def env_mapping(env, rank_name_list=None, local_rank_name_list=None): + rank = None + for rank_name in rank_name_list: + if rank_name in env: + if rank == None: + rank = env.get(rank_name) + elif rank != env.get(rank_name): + raise EnvironmentError(f"rank number doesn't match!") + if rank == None: + raise EnvironmentError(f"rank number is not in current env!") + env['RANK'] = rank + + local_rank = None + for local_rank_name in local_rank_name_list: + if local_rank_name in env: + if local_rank == None: + local_rank = env.get(local_rank_name) + elif local_rank != env.get(local_rank_name): + raise EnvironmentError(f"local_rank number doesn't match!") + if local_rank == None: + raise EnvironmentError(f"rank number is not in current env!") + env['LOCAL_RANK'] = local_rank + + return env + + +def main(args=None): + args = parse_args(args) + + env = os.environ.copy() + + args.launcher = args.launcher.lower() + if args.launcher == MPICH_LAUNCHER: + rank_name_list = ["PMIX_RANK"] + ["PMI_RANK"] + local_rank_name_list = ["PALS_LOCAL_RANKID"] + ["MPI_LOCALRANKID"] + env = env_mapping(env, rank_name_list=rank_name_list, local_rank_name_list=local_rank_name_list) + else: + raise NotImplementedError(f"Unknown launcher {args.launcher}") + + python_exec = [] + if not args.no_python: + python_exec += [sys.executable, "-u"] + if args.module: + python_exec.append("-m") + cmd = python_exec + [args.user_script] + args.user_args + + logger.info(f"launcher_helper cmd = {' '.join(cmd)}") + + result = subprocess.Popen(cmd, env=env, close_fds=False) + result.wait() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/multinode_runner.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/multinode_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..ce58deadc28146a6b7ceb5462dce874c6c3b12ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/multinode_runner.py @@ -0,0 +1,460 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import sys +import shutil +import subprocess +import warnings +from shlex import split +from abc import ABC, abstractmethod +from deepspeed.accelerator import get_accelerator +from ..utils import logger, get_numactl_cmd +from .constants import PDSH_MAX_FAN_OUT, MVAPICH_TMP_HOSTFILE + + +class MultiNodeRunner(ABC): + + def __init__(self, args, world_info_base64): + self.args = args + self.validate_args() + self.user_arguments = self.parse_user_args() + self.user_script = args.user_script + self.world_info_base64 = world_info_base64 + self.exports = {} + + @abstractmethod + def backend_exists(self): + """Return whether the corresponding backend exists""" + + @abstractmethod + def get_cmd(self, environment, active_resources): + """Return the command to execute on node""" + + def add_export(self, key, var): + self.exports[key.strip()] = var.strip() + + def parse_user_args(self): + return self.args.user_args + + @property + def name(self): + """Return the name of the backend""" + return self.__class__.__name__ + + def validate_args(self): + """Validate self.args""" + + +class PDSHRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64): + super().__init__(args, world_info_base64) + + def backend_exists(self): + return shutil.which('pdsh') + + def parse_user_args(self): + processed_args = [] + for arg in self.args.user_args: + # With pdsh, if we are passing a string as an argument, it will get + # split on whitespace. To avoid this and support strings that + # contain '"', we do this extra processing step: + if " " in arg: + arg = '"{}"'.format(arg.replace('"', '\\"')) + processed_args.append(arg) + return processed_args + + @property + def name(self): + return "pdsh" + + def get_cmd(self, environment, active_resources): + environment['PDSH_RCMD_TYPE'] = 'ssh' + if self.args.ssh_port is not None: # only specify ssh port if it is specified + environment["PDSH_SSH_ARGS_APPEND"] = f"{environment.get('PDSH_SSH_ARGS_APPEND', '')} \ + -p {self.args.ssh_port}" + + active_workers = ",".join(active_resources.keys()) + logger.info("Running on the following workers: %s" % active_workers) + + # PDSH flags for max node fan out and specific hosts to launch on + # See https://linux.die.net/man/1/pdsh for flag details + pdsh_cmd_args = ['pdsh', '-S', '-f', str(PDSH_MAX_FAN_OUT), '-w', active_workers] + split( + self.args.launcher_args) + + exports = "" + for key, val in self.exports.items(): + exports += "export {}={}; ".format(key, val) + + # https://linux.die.net/man/1/pdsh + # %n will be replaced by pdsh command + deepspeed_launch = [ + exports, f"cd {os.path.abspath('.')};", sys.executable, "-u", "-m", "deepspeed.launcher.launch", + f'--world_info={self.world_info_base64}', "--node_rank=%n", f"--master_addr={self.args.master_addr}", + f"--master_port={self.args.master_port}" + ] + if self.args.no_python: + deepspeed_launch.append("--no_python") + if self.args.module: + deepspeed_launch.append("--module") + if self.args.no_local_rank: + deepspeed_launch.append("--no_local_rank") + if self.args.save_pid: + deepspeed_launch += ["--save_pid", f"{os.getpid()}"] + if self.args.elastic_training: + deepspeed_launch.append("--enable_elastic_training") + deepspeed_launch.append(f"--max_elastic_nodes={self.args.max_elastic_nodes}") + deepspeed_launch.append(f"--min_elastic_nodes={self.args.min_elastic_nodes}") + + cmd_to_search = [i + "\\" for i in deepspeed_launch[2:6]] + + kill_command = pdsh_cmd_args + ["pkill -f ", " ".join(cmd_to_search)[:-2]] + return pdsh_cmd_args + deepspeed_launch + [self.user_script] + self.user_arguments, kill_command, environment + + +class OpenMPIRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + self.add_export('UCX_TLS', 'tcp') + + def backend_exists(self): + #TODO: if IB is available we should suggestion mvapich + return shutil.which('ompi_info') + + @property + def name(self): + return "openmpi" + + def validate_args(self): + super().validate_args() + #TODO: Allow for include/exclude at node-level but not gpu-level + if self.args.include != "" or self.args.exclude != "": + raise ValueError(f"{self.name} backend does not support worker include/exclusion") + if self.args.num_nodes != -1 or self.args.num_gpus != -1: + raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus") + + def get_cmd(self, environment, active_resources): + total_process_count = sum(self.resource_pool.values()) + + mpirun_cmd = [ + 'mpirun', + '-n', + f'{total_process_count}', + '-hostfile', + f'{self.args.hostfile}', + '--mca', + 'btl', + '^openib', + '--mca', + 'btl_tcp_if_include', + 'eth0', + ] + split(self.args.launcher_args) + + export_cmd = [] + for k, v in self.exports.items(): + export_cmd += ['-x', "{}={}".format(k, v)] + + python_exec = [] + if not self.args.no_python: + python_exec = [sys.executable, "-u"] + if self.args.module: + python_exec.append("-m") + + return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments + + +class MPICHRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + def backend_exists(self): + #TODO: if IB is available we should suggestion mpich + return shutil.which('mpirun') #mpich_info + + @property + def name(self): + return "mpich" + + def validate_args(self): + super().validate_args() + #TODO: Allow for include/exclude at node-level but not gpu-level + if self.args.include != "" or self.args.exclude != "": + raise ValueError(f"{self.name} backend does not support worker include/exclusion") + + if self.args.num_nodes != -1 or self.args.num_gpus != -1: + raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus") + + def get_cmd(self, environment, active_resources): + devices_per_node = self.resource_pool.values() + total_process_count = sum(devices_per_node) + process_per_node = list(devices_per_node)[0] + if not all([n == process_per_node for n in devices_per_node]): + raise ValueError("MPICH requires same number of devices per node") + + mpirun_cmd = [ + 'mpirun', + '-n', + f'{total_process_count}', + '-ppn', + f'{process_per_node}', + ] + split(self.args.launcher_args) + export_cmd = [] + + for k, v in self.exports.items(): + export_cmd += ['-genv', "{}={}".format(k, v)] + + export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)] + export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)] + export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)] + export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)] + + export_cmd += ['-hosts'] + hosts = "" + for i, host in enumerate(self.resource_pool.keys()): + if i == 0: + hosts = f"{host}" + else: + hosts += f",{host}" + export_cmd += [hosts] + + helper_args = ["--launcher"] + [self.args.launcher] + python_exec = [] + if not self.args.no_python: + python_exec += [sys.executable, "-u"] + if self.args.module: + python_exec.append("-m") + helper_args.append("--module") + else: + helper_args.append("--no_python") + + helper_cmd = str(os.path.dirname(os.path.realpath(__file__))) + '/launcher_helper.py' + helper_cmd = [helper_cmd] + helper_args + [self.user_script] + self.user_arguments + + return mpirun_cmd + export_cmd + python_exec + helper_cmd + + +class IMPIRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + def backend_exists(self): + #TODO: if IB is available we should suggestion mpich + return shutil.which('mpirun') #mpich_info + + @property + def name(self): + return "impi" + + def validate_args(self): + super().validate_args() + #TODO: Allow for include/exclude at node-level but not gpu-level + if self.args.include != "" or self.args.exclude != "": + raise ValueError(f"{self.name} backend does not support worker include/exclusion") + + if self.args.num_nodes != -1 or self.args.num_gpus != -1: + raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus") + + def get_cmd(self, environment, active_resources): + devices_per_node = self.resource_pool.values() + total_process_count = sum(devices_per_node) + process_per_node = list(devices_per_node)[0] + if not all([n == process_per_node for n in devices_per_node]): + raise ValueError("Intel MPI requires same number of devices per node") + + mpirun_cmd = [ + 'mpirun', + '-ppn', + f'{process_per_node}', + ] + split(self.args.launcher_args) + export_cmd = [] + + for k, v in self.exports.items(): + export_cmd += ['-genv', f'{k}', f'{v}'] + + if self.args.bind_cores_to_rank: + cores_per_rank, _ = get_numactl_cmd(self.args.bind_core_list, process_per_node, 0) + export_cmd += ['-genv', 'OMP_NUM_THREADS', str(cores_per_rank)] + + export_cmd += ['-genv', 'MASTER_ADDR', str(self.args.master_addr)] + export_cmd += ['-genv', 'MASTER_PORT', str(self.args.master_port)] + export_cmd += ['-genv', 'WORLD_SIZE', str(total_process_count)] + export_cmd += ['-genv', 'LOCAL_SIZE', str(process_per_node)] + + # turn off IMPI core binding, use deepspeed's own core binding + export_cmd += ['-genv', 'I_MPI_PIN', '0'] + + export_cmd += ['-hosts'] + hosts = "" + for i, host in enumerate(self.resource_pool.keys()): + if i == 0: + hosts = f"{host}" + else: + hosts += f",{host}" + export_cmd += [hosts] + + per_host_cmd = [] + + for i in range(total_process_count): + local_rank = i % process_per_node + python_exec = [] + if self.args.bind_cores_to_rank: + _, numactl_cmd = get_numactl_cmd(self.args.bind_core_list, process_per_node, local_rank) + python_exec += numactl_cmd + + if not self.args.no_python: + python_exec += [sys.executable, "-u"] + if self.args.module: + python_exec.append("-m") + env_mapping = ['-env', 'RANK', str(i)] + env_mapping += ['-env', 'LOCAL_RANK', str(local_rank)] + if i == 0: + per_host_cmd = ['-n', '1'] + env_mapping + python_exec + [self.user_script] + self.user_arguments + else: + per_host_cmd = per_host_cmd + [':', '-n', '1'] + env_mapping + python_exec + [self.user_script + ] + self.user_arguments + print(mpirun_cmd + export_cmd + per_host_cmd) + return mpirun_cmd + export_cmd + per_host_cmd + + +class SlurmRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + def backend_exists(self): + return shutil.which('sinfo') + + @property + def name(self): + return 'slurm' + + def get_cmd(self, environment, active_resources): + assert not getattr(self.args, 'detect_nvlink_pairs', + False), "slurm backend does not support remapping visible devices" + total_process_count = sum(self.resource_pool.values()) + srun_cmd = [ + 'srun', + '-n', + f'{total_process_count}', + ] + split(self.args.launcher_args) + + if getattr(self.args, 'slurm_comment', ''): + srun_cmd += ['--comment', self.args.slurm_comment] + + if self.args.include != "": + srun_cmd.append('--include') + srun_cmd.append(f'{self.args.include}') + if self.args.exclude != "": + srun_cmd.append('--exclude') + srun_cmd.append(f'{self.args.exclude}') + if self.args.num_nodes > 0: + srun_cmd.append('--nodes') + srun_cmd.append(f'{self.args.num_nodes}') + if self.args.num_gpus > 0: + srun_cmd.append('--gpus') + srun_cmd.append(f'{self.args.num_gpus}') + + exports = '--export=ALL' + for key, val in self.exports.items(): + exports += f",{key}={val}" + + python_exec = [sys.executable, "-u"] + command = srun_cmd + [exports] + python_exec + [self.user_script] + self.user_arguments + return command + + +class MVAPICHRunner(MultiNodeRunner): + + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + # Disable the CMA kernel module, not available on Ubuntu systems + self.add_export('MV2_SMP_USE_CMA', '0') + + # If we fail this will output more verbose logging + self.add_export('MV2_DEBUG_SHOW_BACKTRACE', '1') + + # Enabled cuda-aware communication + if get_accelerator().device_name() == 'cuda': + self.add_export('MV2_USE_CUDA', '1') + + # Support deep learning frameworks: http://hidl.cse.ohio-state.edu/userguide/horovod/ + self.add_export('MV2_SUPPORT_DL', '1') + + # Support MPI_THREAD_MULTIPLE + self.add_export('MV2_ENABLE_AFFINITY', '0') + + # Performance tuning flags for allgather + self.add_export('MV2_INTER_ALLGATHER_TUNING', '5') + self.add_export('MV2_CUDA_USE_NAIVE', '0') + + def backend_exists(self): + #TODO: if IB is available we should suggestion mvapich + mpiname_exists = shutil.which('mpiname') + exists = False + if not mpiname_exists: + warnings.warn("mpiname does not exist, mvapich is not installed properly") + else: + results = subprocess.check_output('mpiname', shell=True) + mpiname_results = results.decode('utf-8').strip() + if "MVAPICH2-GDR" in mpiname_results: + exists = True + else: + warnings.warn(f"Expected MVAPICH2-GDR as return for mpiname but received {mpiname_results}") + return exists + + @property + def name(self): + return "mvapich" + + def validate_args(self): + super().validate_args() + #TODO: Allow for include/exclude at node-level but not gpu-level + if self.args.include != "" or self.args.exclude != "": + raise ValueError(f"{self.name} backend does not support worker include/exclusion") + if self.args.num_nodes != -1 or self.args.num_gpus != -1: + raise ValueError(f"{self.name} backend does not support limiting num nodes/gpus") + + def get_cmd(self, environment, active_resources): + devices_per_node = self.resource_pool.values() + total_process_count = sum(devices_per_node) + process_per_node = list(devices_per_node)[0] + if not all([n == process_per_node for n in devices_per_node]): + raise ValueError("mvapich requires same number of devices per node") + + with open(MVAPICH_TMP_HOSTFILE, 'w') as fd: + for host in self.resource_pool.keys(): + fd.write(f'{host}\n') + + mpirun_cmd = [ + 'mpirun', + '-np', + f'{total_process_count}', + '-ppn', + f'{process_per_node}', + '--hostfile', + f'{MVAPICH_TMP_HOSTFILE}', + ] + split(self.args.launcher_args) + + export_cmd = [] + for k, v in self.exports.items(): + export_cmd += ['-env', "{}={}".format(k, v)] + + python_exec = [] + if not self.args.no_python: + python_exec = [sys.executable, "-u"] + if self.args.module: + python_exec.append("-m") + + return mpirun_cmd + export_cmd + python_exec + [self.user_script] + self.user_arguments diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/runner.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..0ca89dd0497a949458240883922dbec910d29f35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/runner.py @@ -0,0 +1,594 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +DeepSpeed runner is the main front-end to launching multi-worker +training jobs with DeepSpeed. By default this uses pdsh to parallel +ssh into multiple worker nodes and launch all the necessary processes +per rank for training. +""" + +import os +import re +import sys +import json +import base64 +import argparse +import subprocess +import collections +from copy import deepcopy +import signal +import time + +from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner, MPICHRunner, IMPIRunner +from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER, MPICH_LAUNCHER, IMPI_LAUNCHER +from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT +from ..nebula.constants import NEBULA_EXPORT_ENVS +from ..utils import logger + +from ..autotuning import Autotuner +from deepspeed.accelerator import get_accelerator + +DLTS_HOSTFILE = "/job/hostfile" +EXPORT_ENVS = ['MLFLOW', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS += NEBULA_EXPORT_ENVS +DEEPSPEED_ENVIRONMENT_NAME = os.getenv("DS_ENV_FILE", ".deepspeed_env") +DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] +PDSH_MAX_FAN_OUT = 1024 + +# On AISC compute, each node sets environment variables independently, want to prevent +# exporting rank-0 env variables in case of heterogeneous compute. +EXCLUDE_ENVS = {'AISC_JOB_NAME': ['NCCL_IB_HCA', 'UCX_NET_DEVICES']} + + +def parse_args(args=None): + parser = argparse.ArgumentParser(description="DeepSpeed runner to help launch distributed " + "multi-node/multi-gpu training jobs.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument("-H", + "--hostfile", + type=str, + default=DLTS_HOSTFILE, + help="Hostfile path (in MPI style) that defines the " + "resource pool available to the job (e.g., " + "worker-0 slots=4)") + + parser.add_argument("-i", + "--include", + type=str, + default="", + help='''Specify hardware resources to use during execution. + String format is + NODE_SPEC[@NODE_SPEC ...], + where + NODE_SPEC=NAME[:SLOT[,SLOT ...]]. + If :SLOT is omitted, include all slots on that host. + Example: -i "worker-0@worker-1:0,2" will use all slots + on worker-0 and slots [0, 2] on worker-1. + ''') + + parser.add_argument("-e", + "--exclude", + type=str, + default="", + help='''Specify hardware resources to NOT use during execution. + Mutually exclusive with --include. Resource formatting + is the same as --include. + Example: -e "worker-1:0" will use all available + resources except slot 0 on worker-1. + ''') + + parser.add_argument("--num_nodes", + type=int, + default=-1, + help="Total number of worker nodes to run on, this will use " + "the top N hosts from the given hostfile.") + + parser.add_argument("--min_elastic_nodes", + type=int, + default=-1, + help="Minimum number of nodes to run elastic training on. " + "Default is 1 when elastic training is enabled") + + parser.add_argument("--max_elastic_nodes", + type=int, + default=-1, + help="Maximum number of nodes to run elastic training on. " + "Default is num_nodes when elastic training is enabled") + + parser.add_argument("--num_gpus", + "--num_accelerators", + type=int, + default=-1, + help="Max number of GPUs to use on each node, will use " + "[0:N) GPU ids on each node.") + + parser.add_argument("--master_port", + default=TORCH_DISTRIBUTED_DEFAULT_PORT, + type=int, + help="(optional) Port used by PyTorch distributed for " + "communication during training.") + + parser.add_argument("--master_addr", + default="", + type=str, + help="(optional) IP address of node 0, will be " + "inferred via 'hostname -I' if not specified.") + + parser.add_argument("--launcher", + default=PDSH_LAUNCHER, + type=str, + help="(optional) choose launcher backend for multi-node " + "training. Options currently include PDSH, OpenMPI, MVAPICH, SLURM, MPICH, IMPI.") + + parser.add_argument("--launcher_args", + default="", + type=str, + help="(optional) pass launcher specific arguments as a " + "single quoted argument.") + + parser.add_argument("--module", + action="store_true", + help="Change each process to interpret the launch " + "script as a Python module, executing with the same " + "behavior as 'python -m'.") + + parser.add_argument("--no_python", + action="store_true", + help="Skip prepending the training script with " + "'python' - just execute it directly.") + + parser.add_argument("--no_local_rank", + action="store_true", + help="Do not pass local_rank as an argument when calling " + "the user's training script.") + + parser.add_argument("--no_ssh_check", + action="store_true", + help="Do not perform ssh check in multi-node launcher model") + + parser.add_argument("--force_multi", + action="store_true", + help="Force multi-node launcher mode, helps in cases where user " + "wants to launch on single remote node.") + + parser.add_argument("--save_pid", + action="store_true", + help="Save file containing launcher process id (pid) at /tmp/.ds, " + "where is the pid of the first process that invoked `deepspeed`. " + "Useful when launching deepspeed processes programmatically.") + + parser.add_argument("--enable_each_rank_log", + default="None", + type=str, + help="redirect the stdout and stderr from each rank into different log files") + + parser.add_argument("--autotuning", + default="", + choices=["tune", "run"], + type=str, + help="Run DeepSpeed autotuner to discover optimal configuration parameters " + "before running job.") + + parser.add_argument("--elastic_training", + action="store_true", + help="Enable elastic training support in DeepSpeed.") + + parser.add_argument("user_script", type=str, help="User script to launch, followed by any required " + "arguments.") + + parser.add_argument('user_args', nargs=argparse.REMAINDER) + + parser.add_argument("--bind_cores_to_rank", + action="store_true", + help="Bind each rank to different cores of the host") + + parser.add_argument("--bind_core_list", + type=str, + default=None, + help="List of cores to bind to with comma separated list of " + "numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not " + "specified, all cores on system would be used rank binding") + + parser.add_argument("--ssh_port", type=int, default=None, help="SSH port to use for remote connections") + + return parser.parse_args(args=args) + + +def fetch_hostfile(hostfile_path): + if not os.path.isfile(hostfile_path): + logger.warning("Unable to find hostfile, will proceed with training " + "with local resources only.") + return None + + # e.g., worker-0 slots=16 + with open(hostfile_path, 'r') as fd: + hostfile_text = fd.readlines() + + return _parse_hostfile(hostfile_text) + + +def _parse_hostfile(hostfile_lines): + # Regex matches one or more non-whitespace characters (\S+) at the start of + # the line, followed by one or more whitespace characters (\s+), followed + # by the string "slots=", followed by one or more digits (\d+). + pattern = r'^(\S+)\s+slots=(\d+)' + + resource_pool = collections.OrderedDict() + + for line in hostfile_lines: + line = line.strip() + match = re.search(pattern, line) + if line.startswith("#") or line == "": + # hostfile comment or empty line, ignore + continue + elif match: + host = match.group(1) + num_slots = int(match.group(2)) + if host in resource_pool: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError(f"Hostfile contains multiple entries for {host}, unable to proceed with launching") + resource_pool[host] = num_slots + else: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError(f"Hostfile contains a bad entry: {line}, unable to proceed with launching") + + if len(resource_pool) == 0: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError("Hostfile is empty or not formatted correctly, unable to proceed with launching.") + + return resource_pool + + +def _stable_remove_duplicates(data): + # Create a new list in the same order as original but with duplicates + # removed, should never be more than ~16 elements so simple is best + new_list = [] + for x in data: + if x not in new_list: + new_list.append(x) + return new_list + + +def parse_resource_filter(host_info, include_str="", exclude_str=""): + '''Parse an inclusion or exclusion string and filter a hostfile dictionary. + + String format is NODE_SPEC[@NODE_SPEC ...], where + NODE_SPEC = NAME[:SLOT[,SLOT ...]]. + If :SLOT is omitted, include/exclude all slots on that host. + + Examples: + include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and + slots [0, 2] on worker-1. + exclude_str="worker-1:0" will use all available resources except + slot 0 on worker-1. + ''' + + # Constants that define our syntax + NODE_SEP = '@' + SLOT_LIST_START = ':' + SLOT_SEP = ',' + + # Ensure include/exclude are mutually exclusive + if (include_str != "") and (exclude_str != ""): + raise ValueError('include_str and exclude_str are mutually exclusive.') + + # no-op + if (include_str == "") and (exclude_str == ""): + return host_info + + # Either build from scratch or remove items + filtered_hosts = dict() + if include_str: + parse_str = include_str + if exclude_str != "": + filtered_hosts = deepcopy(host_info) + parse_str = exclude_str + + # foreach node in the list + for node_config in parse_str.split(NODE_SEP): + # Node can either be alone or node:slot,slot,slot + if SLOT_LIST_START in node_config: + hostname, slots = node_config.split(SLOT_LIST_START) + slots = [int(x) for x in slots.split(SLOT_SEP)] + + # sanity checks + if hostname not in host_info: + raise ValueError(f"Hostname '{hostname}' not found in hostfile") + for slot in slots: + if slot not in host_info[hostname]: + raise ValueError(f"No slot '{slot}' specified on host '{hostname}'") + + # If include string, build the list from here + if include_str: + filtered_hosts[hostname] = slots + elif exclude_str: + for slot in slots: + logger.info(f'removing {slot} from {hostname}') + filtered_hosts[hostname].remove(slot) + + # User just specified the whole node + else: + hostname = node_config + # sanity check hostname + if hostname not in host_info: + raise ValueError(f"Hostname '{hostname}' not found in hostfile") + + if include_str: + filtered_hosts[hostname] = host_info[hostname] + elif exclude_str: + filtered_hosts[hostname] = [] + + # Post-processing to remove duplicates and empty nodes + del_keys = [] + for hostname in filtered_hosts: + # Remove duplicates + filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname]) + # Remove empty hosts + if len(filtered_hosts[hostname]) == 0: + del_keys.append(hostname) + for name in del_keys: + del filtered_hosts[name] + + # Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure + # we map ranks to nodes correctly by maintaining host_info ordering. + ordered_hosts = collections.OrderedDict() + for host in host_info: + if host in filtered_hosts: + ordered_hosts[host] = filtered_hosts[host] + + return ordered_hosts + + +def parse_inclusion_exclusion(resource_pool, inclusion, exclusion): + active_resources = collections.OrderedDict() + for hostname, slots in resource_pool.items(): + active_resources[hostname] = list(range(slots)) + + return parse_resource_filter(active_resources, include_str=inclusion, exclude_str=exclusion) + + +def encode_world_info(world_info): + world_info_json = json.dumps(world_info).encode('utf-8') + world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8') + return world_info_base64 + + +def run_autotuning(args, active_resources): + tuner = Autotuner(args, active_resources) + logger.info("[Start] Running autotuning") + + tuner.tune() + tuner.print_tuning_results() + + logger.info("[End] Running autotuning") + tuner.write_optimal_config() + + if args.autotuning == "run": + tuner.run_after_tuning() + + +def parse_num_nodes(str_num_nodes: str, elastic_training: bool): + node_list = str_num_nodes.split(":") + + if len(node_list) == 1: + min_nodes, max_nodes = int(node_list[0]), -1 + elif len(node_list) == 2 and elastic_training: + min_nodes, max_nodes = int(node_list[0]), int(node_list[1]) + elif len(node_list) == 2 and not elastic_training: + raise RuntimeError("MIN:MAX format is only supported in elastic training") + else: + raise RuntimeError("num_nodes {} is not in MIN:MAX format".format(str_num_nodes)) + + return min_nodes, max_nodes + + +def main(args=None): + args = parse_args(args) + + if args.elastic_training: + assert args.master_addr != "", "Master Addr is required when elastic training is enabled" + + resource_pool = fetch_hostfile(args.hostfile) + + # respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "") + if not resource_pool and len(cuda_visible_devices): + detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}" + if len(args.include) or len(args.exclude) or args.num_nodes > 1 or args.num_gpus > 0: + print( + f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed." + ) + else: + args.include = f"localhost:{cuda_visible_devices}" + print(f"{detected_str}: setting --include={args.include}") + del os.environ["CUDA_VISIBLE_DEVICES"] + + if args.num_nodes >= 0 or args.num_gpus >= 0: + if args.include != "" or args.exclude != "": + raise ValueError("Cannot specify num_nodes/gpus with include/exclude") + + multi_node_exec = True + if not resource_pool: + resource_pool = {} + device_count = get_accelerator().device_count() + if device_count == 0: + raise RuntimeError("Unable to proceed, no GPU resources available") + resource_pool['localhost'] = device_count + args.master_addr = "127.0.0.1" + multi_node_exec = False + + if not multi_node_exec and args.num_nodes > 1: + raise ValueError("Num nodes is >1 but no extra nodes available via hostfile") + + active_resources = parse_inclusion_exclusion(resource_pool, args.include, args.exclude) + env = os.environ.copy() + + # validate that passwordless-ssh is workly properly with this hostfile + if multi_node_exec and not args.no_ssh_check: + first_host = list(active_resources.keys())[0] + try: + ssh_check_cmd = "ssh -o PasswordAuthentication=no " + if args.ssh_port is not None: + ssh_check_cmd += f"-p {args.ssh_port} " + ssh_check_cmd += f"{first_host} hostname" + subprocess.check_call(ssh_check_cmd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, shell=True) + except subprocess.CalledProcessError: + raise RuntimeError( + f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh." + ) + + if not args.master_addr: + assert multi_node_exec + first_host = list(active_resources.keys())[0] + ssh_check_cmd = "ssh " + if args.ssh_port is not None: + ssh_check_cmd += f" -p {args.ssh_port}" + ssh_check_cmd += f" {first_host} hostname -I" + hostname_cmd = [ssh_check_cmd] + try: + result = subprocess.check_output(hostname_cmd, shell=True) + except subprocess.CalledProcessError as err: + logger.error( + "Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr" + ) + raise err + args.master_addr = result.decode('utf-8').split()[0] + if not args.master_addr: + raise RuntimeError( + f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr" + ) + logger.info(f"Using IP address of {args.master_addr} for node {first_host}") + + if args.autotuning != "": + run_autotuning(args, active_resources) + return + + if args.num_nodes > 0: + updated_active_resources = collections.OrderedDict() + for count, hostname in enumerate(active_resources.keys()): + if args.num_nodes == count: + break + updated_active_resources[hostname] = active_resources[hostname] + active_resources = updated_active_resources + + if args.num_gpus > 0: + updated_active_resources = collections.OrderedDict() + for hostname in active_resources.keys(): + updated_active_resources[hostname] = list(range(args.num_gpus)) + active_resources = updated_active_resources + + if args.elastic_training: + assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training" + + # encode world info as base64 to make it easier to pass via command line + world_info_base64 = encode_world_info(active_resources) + + multi_node_exec = args.force_multi or len(active_resources) > 1 + + if not multi_node_exec: + deepspeed_launch = [ + sys.executable, "-u", "-m", "deepspeed.launcher.launch", f"--world_info={world_info_base64}", + f"--master_addr={args.master_addr}", f"--master_port={args.master_port}" + ] + if args.no_python: + deepspeed_launch.append("--no_python") + if args.module: + deepspeed_launch.append("--module") + if args.no_local_rank: + deepspeed_launch.append("--no_local_rank") + if args.save_pid: + deepspeed_launch += ["--save_pid", f"{os.getpid()}"] + if args.enable_each_rank_log: + deepspeed_launch.append(f"--enable_each_rank_log={args.enable_each_rank_log}") + if args.elastic_training: + deepspeed_launch.append("--enable_elastic_training") + deepspeed_launch.append(f"--max_elastic_nodes={args.max_elastic_nodes}") + deepspeed_launch.append(f"--min_elastic_nodes={args.min_elastic_nodes}") + if args.bind_cores_to_rank: + deepspeed_launch.append("--bind_cores_to_rank") + if args.bind_core_list is not None: + deepspeed_launch.append(f"--bind_core_list={args.bind_core_list}") + cmd = deepspeed_launch + [args.user_script] + args.user_args + else: + args.launcher = args.launcher.lower() + if args.launcher == PDSH_LAUNCHER: + runner = PDSHRunner(args, world_info_base64) + elif args.launcher == OPENMPI_LAUNCHER: + runner = OpenMPIRunner(args, world_info_base64, resource_pool) + elif args.launcher == MPICH_LAUNCHER: + runner = MPICHRunner(args, world_info_base64, resource_pool) + elif args.launcher == IMPI_LAUNCHER: + runner = IMPIRunner(args, world_info_base64, resource_pool) + elif args.launcher == MVAPICH_LAUNCHER: + runner = MVAPICHRunner(args, world_info_base64, resource_pool) + elif args.launcher == SLURM_LAUNCHER: + runner = SlurmRunner(args, world_info_base64, resource_pool) + else: + raise NotImplementedError(f"Unknown launcher {args.launcher}") + + if not runner.backend_exists(): + raise RuntimeError(f"launcher '{args.launcher}' not installed.") + + curr_path = os.path.abspath('.') + if 'PYTHONPATH' in env: + env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH'] + else: + env['PYTHONPATH'] = curr_path + + excluded_vars = [] + for exclude_key, var_list in EXCLUDE_ENVS.items(): + if exclude_key in env.keys(): + # key exists in launcher env -> var list should be used + excluded_vars += var_list + + # load envs from accelerator + exports = EXPORT_ENVS + get_accelerator().export_envs() + for var in env.keys(): + if any([var.startswith(name) for name in exports]): + if not any([var == name for name in excluded_vars]): + runner.add_export(var, env[var]) + + for environ_path in DEEPSPEED_ENVIRONMENT_PATHS: + environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME) + if os.path.isfile(environ_file): + logger.info(f"deepspeed_env file = {environ_file}") + with open(environ_file, 'r') as fd: + for var in fd.readlines(): + key, val = var.split('=', maxsplit=1) + runner.add_export(key, val) + + if args.launcher == PDSH_LAUNCHER: + cmd, kill_cmd, env = runner.get_cmd(env, active_resources) + else: + cmd = runner.get_cmd(env, active_resources) + + logger.info(f"cmd = {' '.join(cmd)}") + result = subprocess.Popen(cmd, env=env) + + def sigkill_handler(signum, frame): + result.send_signal(signal.SIGINT) + time.sleep(0.1) + result.send_signal(signal.SIGTERM) + result_kill = subprocess.Popen(kill_cmd, env=env) + result_kill.wait() + time.sleep(1) + sys.exit(1) + + if args.launcher == PDSH_LAUNCHER and multi_node_exec: + signal.signal(signal.SIGINT, sigkill_handler) + signal.signal(signal.SIGTERM, sigkill_handler) + + result.wait() + + # In case of failure must propagate the error-condition back to the caller (usually shell). The + # actual error and traceback should have been printed in the subprocess, so in order to avoid + # unnecessary noise we just quietly exit here with the same code as the subprocess + if result.returncode > 0: + sys.exit(result.returncode) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/deepspeed/sequence/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/sequence/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208299fb8c50f73468d293b6fa5dca71649d62e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/sequence/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..307850d0743c74fa90608c65f5082cc6603eebc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebc9f27ab88a539677623ab5b07ab9e039691e7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/sequence/__pycache__/layer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/sequence/layer.py b/venv/lib/python3.10/site-packages/deepspeed/sequence/layer.py new file mode 100644 index 0000000000000000000000000000000000000000..e1dbff87f4ec3c1a3edc89d6d8f05eef8c9ee5e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/sequence/layer.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from typing import Any, Tuple +from torch import Tensor +from torch.nn import Module + +import deepspeed.comm as dist + + +def single_all_to_all(input, scatter_idx, gather_idx, group): + seq_world_size = dist.get_world_size(group) + inp_shape = list(input.shape) + inp_shape[scatter_idx] = inp_shape[scatter_idx] // seq_world_size + if scatter_idx < 2: + input_t = input.reshape( + [seq_world_size, inp_shape[scatter_idx]] + \ + inp_shape[scatter_idx + 1:] + ).contiguous() + else: + # transpose groups of heads with the seq-len parallel dimension, so that we can scatter them! + input_t = input.reshape( + [-1, seq_world_size, inp_shape[scatter_idx]] + \ + inp_shape[scatter_idx + 1:] + ).transpose(0, 1).contiguous() + + output = torch.empty_like(input_t) + dist.all_to_all_single(output, input_t, group=group) + + # if scattering the seq-dim, transpose the heads back to the original dimension + if scatter_idx < 2: + output = output.transpose(0, 1).contiguous() + + return output.reshape( + inp_shape[: gather_idx] + \ + [inp_shape[gather_idx] * seq_world_size,] + \ + inp_shape[gather_idx + 1:]).contiguous() + + +class _SeqAllToAll(torch.autograd.Function): + + @staticmethod + def forward(ctx: Any, group: dist.ProcessGroup, input: Tensor, scatter_idx: int, gather_idx: int) -> Tensor: + + ctx.group = group + ctx.scatter_idx = scatter_idx + ctx.gather_idx = gather_idx + + return single_all_to_all(input, scatter_idx, gather_idx, group) + + @staticmethod + def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor, None, None]: + return (None, _SeqAllToAll.apply(ctx.group, *grad_output, ctx.gather_idx, ctx.scatter_idx), None, None) + + +class DistributedAttention(torch.nn.Module): + """Initialization. + + Arguments: + local_attention (Module): local attention with q,k,v + sequence_process_group (ProcessGroup): sequence parallel process group + scatter_idx (int): scatter_idx for all2all comm + gather_idx (int): gather_idx for all2all comm + """ + + def __init__( + self, + local_attention: Module, + sequence_process_group: dist.ProcessGroup, + scatter_idx: int = 2, + gather_idx: int = 0, + ) -> None: + + super(DistributedAttention, self).__init__() + self.local_attn = local_attention + self.spg = sequence_process_group + self.scatter_idx = scatter_idx + self.gather_idx = gather_idx + + def forward(self, query: Tensor, key: Tensor, value: Tensor, *args: Any) -> Tensor: + """ forward + + Arguments: + query (Tensor): query input to the layer + key (Tensor): key input to the layer + value (Tensor): value input to the layer + args: other args + + Returns: + * output (Tensor): context output + """ + # TODO Merge three alltoall calls into one + # TODO (Reza): change the api on the megatron-deepspeed side so that we only receive all data (q,k, and v) together! + #in shape : e.g., [s/p:h:] + query_layer = _SeqAllToAll.apply(self.spg, query, self.scatter_idx, self.gather_idx) + key_layer = _SeqAllToAll.apply(self.spg, key, self.scatter_idx, self.gather_idx) + value_layer = _SeqAllToAll.apply(self.spg, value, self.scatter_idx, self.gather_idx) + + #out shape : e.g., [s:h/p:] + context_layer = self.local_attn(query_layer, key_layer, value_layer, *args) + + output = _SeqAllToAll.apply(self.spg, context_layer, self.gather_idx, self.scatter_idx) + + #out e.g., [s/p::h] + return output diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..951ceb7f7f17c2f89280aac5d5c2da81afd69d43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:862e08520a2433a495a3bd3ae9fd9e6c7c540a9c632db29bb8252784cbdad779 +size 1786 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2f757032db273b37ef22dc6d4468e675e7bd0915 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdf-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a966dad58cf5fbc914a374ad5556c0414f5ed962237ed55a379fe96e308d00de +size 889 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c9c6d8fb40f9db23fb31349fa8a087c288f5dae9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/api-v1-jdq-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a8726d2c3f8bbca79d54d8b191158744b1993146f8f083b111a8ea78536057 +size 145 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429 +size 1841 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24caf1bf71f829c85f13b7d2b8d0a94e4d27f1b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jd-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a672d435b97a6033dfd1d2a5c823d237ad1865101bd5e403cd99b5be0ba4e03b +size 1363 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..be96cc72487b20a47142fb8c999ce032d73fba2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdf-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b8387a7d08014a1c09807ae458ca7666ab8a3c579cbfb189e09c6d7de892a6 +size 866 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..08e36a9fb7d7eb1d95b74eebf7c1b870d4a052c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdq-2.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c46f6c5f221d877de604b906403b20cbdf674f1225bcdbb3e15bd1882a69a471 +size 1501 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..ee6e378589d722771363d186944ed1f0f78c9836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/data-v1-dl-1666876.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe8945b949770b0da42daf58ce67d1c5fee25cf7b4fd145161837c2abc09429 +size 1841 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b2fce3413fd38f4c4f80ef7d6b198b4ac740a90a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdf-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407424fb79cc30b8e9ff90900b3bf29244ac7f3797f278b5be602843f959b4ee +size 425