diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a29bb9447d015d0992c18624dba41a15aa838866 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_adam import DeepSpeedCPUAdam +from .fused_adam import FusedAdam diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c9aa2791267d64e248d3389f67d0b5e0cbfeb9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8755b85fdb2bef0f52b63676cd5d65fa8bd4bc0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c4b9a5222210c32950e169906fd4d8622290c63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b358a63d94da65c1808339766a0d8ee3b078a461 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..10b8c15f970b8b7dfd85db35eb61793faabd43e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from cpuinfo import get_cpu_info +from deepspeed.utils import logger +from deepspeed.utils.logging import should_log_le +from deepspeed.ops.op_builder import CPUAdamBuilder + + +class DeepSpeedCPUAdam(torch.optim.Optimizer): + optimizer_id = 0 + + def __init__(self, + model_params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + adamw_mode=True, + fp32_optimizer_states=True): + """Fast vectorized implementation of two variations of Adam optimizer on CPU: + + * Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980); + * AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101) + + DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W). + In order to apply this optimizer, the model requires to have its master parameter (in FP32) + reside on the CPU memory. + + To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers + the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory, + with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize + the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial + (https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology. + + For calling step function, there are two options available: (1) update optimizer's states and (2) update + optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second + option can bring 30% higher throughput than the doing the copy separately using option one. + + + .. note:: + We recommend using our `config + `_ + to allow :meth:`deepspeed.initialize` to build this optimizer + for you. + + + Arguments: + model_params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in DeepSpeed CPUAdam! + adamw_mode: select between Adam and AdamW implementations (default: AdamW) + fp32_optimizer_states: creates momentum and variance in full precision regardless of + the precision of the parameters (default: True) + """ + + default_args = dict(lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + bias_correction=bias_correction, + amsgrad=amsgrad) + super(DeepSpeedCPUAdam, self).__init__(model_params, default_args) + + cpu_info = get_cpu_info() + self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown" + if "amd" in self.cpu_vendor: + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + if p.dtype == torch.half: + logger.warning("FP16 params for CPUAdam may not work on AMD CPUs") + break + else: + continue + break + + self.opt_id = DeepSpeedCPUAdam.optimizer_id + DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1 + self.adam_w_mode = adamw_mode + self.fp32_optimizer_states = fp32_optimizer_states + self.ds_opt_adam = CPUAdamBuilder().load() + + self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, + should_log_le("info")) + + def __del__(self): + # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize + # is used multiple times in the same process (notebook or pytest worker) + self.ds_opt_adam.destroy_adam(self.opt_id) + + def __setstate__(self, state): + super(DeepSpeedCPUAdam, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None, fp16_param_groups=None): + """Update the model parameters. + + .. note:: + This method will be called internally by ZeRO-Offload. DeepSpeed + users should still use ``engine.step()`` as shown in the + `Getting Started + `_ guide. + + Args: + closure (callable, optional): closure to compute the loss. + Defaults to ``None``. + fp16_param_groups: FP16 GPU parameters to update. Performing the + copy here reduces communication time. Defaults to ``None``. + + Returns: + loss: if ``closure`` is provided. Otherwise ``None``. + """ + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + # intended device for step + device = torch.device('cpu') + + # converting the fp16 params to a group of parameter + if type(fp16_param_groups) is list: + if type(fp16_param_groups[0]) is not list: + fp16_param_groups = [fp16_param_groups] + elif fp16_param_groups is not None: + fp16_param_groups = [[fp16_param_groups]] + + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + + if p.grad is None: + continue + + assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \ + "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config." + + state = self.state[p] + # State initialization + if len(state) == 0: + #print(f'group {group_id} param {param_id} = {p.numel()}') + state['step'] = 0 + + #use full precision by default unless self.fp32_optimizer_states is off + state_dtype = torch.float if self.fp32_optimizer_states else p.dtype + + # gradient momentums + state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + # gradient variances + state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + + state['step'] += 1 + beta1, beta2 = group['betas'] + + if fp16_param_groups is not None: + self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2, + group['eps'], group['weight_decay'], group['bias_correction'], + p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], + fp16_param_groups[group_id][param_id].data) + else: + self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], + group['weight_decay'], group['bias_correction'], p.data, p.grad.data, + state['exp_avg'], state['exp_avg_sq']) + return loss diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..53f859e9cc87bde8f16760a4c23394ea1207af0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4 +""" + +import torch +from .multi_tensor_apply import MultiTensorApply + +multi_tensor_applier = MultiTensorApply(2048 * 32) +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder + + +class FusedAdam(torch.optim.Optimizer): + """Implements Adam algorithm. + + Currently GPU-only. Requires Apex to be installed via + ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``. + + This version of fused Adam implements 2 fusions. + + * Fusion of the Adam update's elementwise operations + * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. + + :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, + or ``torch.optim.Adam`` with ``adam_w_mode=False``:: + + opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....) + ... + opt.step() + + :class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp, + you may choose any ``opt_level``:: + + opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....) + model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2") + ... + opt.step() + + In general, ``opt_level="O1"`` is recommended. + + + .. warning:: + A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments + are now deprecated and unnecessary. + + Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in FusedAdam! + adam_w_mode (boolean, optional): Apply L2 regularization or weight decay + True for decoupled weight decay(also known as AdamW) (default: True) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + + .. _Adam - A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + adam_w_mode=True, + weight_decay=0., + amsgrad=False, + set_grad_none=True): + + if amsgrad: + raise RuntimeError('FusedAdam does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay) + super(FusedAdam, self).__init__(params, defaults) + self.adam_w_mode = 1 if adam_w_mode else 0 + self.set_grad_none = set_grad_none + + fused_adam_cuda = FusedAdamBuilder().load() + # Skip buffer + self._dummy_overflow_buf = get_accelerator().IntTensor([0]) + self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedAdam, self).zero_grad() + + def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes. + """ + if any(p is not None for p in [grads, output_params, scale, grad_norms]): + raise RuntimeError( + 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.' + ) + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + if len(group['params']) == 0: + continue + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' not in group: + group['step'] = 0 + + # create lists for multi-tensor apply + g_16, p_16, m_16, v_16 = [], [], [], [] + g_bf, p_bf, m_bf, v_bf = [], [], [], [] + g_32, p_32, m_32, v_32 = [], [], [], [] + + for p in group['params']: + if p.grad is None: + continue + if p.grad.data.is_sparse: + raise RuntimeError( + 'FusedAdam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + # State initialization + if len(state) == 0: + # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately. + # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time. + # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists. + state['step'] = group.get('step', 0) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if p.dtype == torch.float16: + g_16.append(p.grad.data) + p_16.append(p.data) + m_16.append(state['exp_avg']) + v_16.append(state['exp_avg_sq']) + elif p.dtype == torch.bfloat16: + g_bf.append(p.grad) + p_bf.append(p) + m_bf.append(state['exp_avg']) + v_bf.append(state['exp_avg_sq']) + elif p.dtype == torch.float32: + g_32.append(p.grad.data) + p_32.append(p.data) + m_32.append(state['exp_avg']) + v_32.append(state['exp_avg_sq']) + else: + raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.') + + if len(g_16) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + if len(g_bf) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + if len(g_32) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], + group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode, + bias_correction, group['weight_decay']) + + return loss diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba228505cef747eea4fec62f3e68707fa4daa0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from NVIDIA/apex, commit a109f85 +""" + + +class MultiTensorApply(object): + + def __init__(self, chunk_size): + self.chunk_size = chunk_size + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd9670645597a68f6ff63413fba32ecbacd16eed Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/evoformer_attn.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/evoformer_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ceff9e2105f50d89a260ebb96f3c33c16b1968f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/evoformer_attn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/evoformer_attn.py b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/evoformer_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..da5843d6de31ed94c27c466bbcf5ae5966bb620f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/evoformer_attn.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import numpy as np +from deepspeed.ops.op_builder import EvoformerAttnBuilder +from deepspeed.accelerator import get_accelerator + +kernel_ = None + + +def _attention(Q, K, V, bias1, bias2): + assert Q.shape[-3] > 16, "seq_len must be greater than 16" + O = torch.empty_like(Q, dtype=Q.dtype) + assert get_accelerator().on_accelerator(Q), "Q must be on cuda" + assert get_accelerator().on_accelerator(K), "K must be on cuda" + assert get_accelerator().on_accelerator(V), "V must be on cuda" + assert get_accelerator().on_accelerator(bias1), "bias1 must be on cuda" + assert get_accelerator().on_accelerator(bias2), "bias2 must be on cuda" + global kernel_ + if kernel_ is None: + kernel_ = EvoformerAttnBuilder().load() + nheads = Q.shape[-2] + nq = (Q.shape[-3] + 31) // 32 * 32 + nb = np.prod(Q.shape[:-3]) + lse = torch.empty((nb, nheads, nq), dtype=torch.float32, device=Q.device) + kernel_.attention(Q, K, V, bias1, bias2, O, lse) + return O, lse + + +def attention_bwd(dO, Q, K, V, O, lse, bias1, bias2, bias1_grad, bias2_grad): + assert max(Q.shape[-1], V.shape[-1]) <= 64, "Hidden size is too large. Need to change kMax to a larger value" + dQ = torch.empty_like(Q, dtype=Q.dtype) + dK = torch.empty_like(K, dtype=K.dtype) + dV = torch.empty_like(V, dtype=V.dtype) + assert get_accelerator().on_accelerator(dO), "dO must be on cuda" + assert get_accelerator().on_accelerator(Q), "Q must be on cuda" + assert get_accelerator().on_accelerator(K), "K must be on cuda" + assert get_accelerator().on_accelerator(V), "V must be on cuda" + assert get_accelerator().on_accelerator(O), "O must be on cuda" + global kernel_ + if kernel_ is None: + kernel_ = EvoformerAttnBuilder().load() + delta = torch.empty_like(lse) + if bias1_grad: + dB1 = torch.zeros_like(bias1, dtype=torch.float32) + else: + dB1 = torch.tensor([], dtype=torch.float32, device=bias1.device) + if bias2_grad: + dB2 = torch.zeros_like(bias2, dtype=torch.float32) + else: + dB2 = torch.tensor([], dtype=torch.float32, device=bias2.device) + kernel_.attention_bwd(dO, Q, K, V, O, lse, delta, bias1, bias2, dQ, dK, dV, dB1, dB2) + return dQ, dK, dV, dB1.to(dO.dtype), dB2.to(dO.dtype) + + +class EvoformerFusedAttention(torch.autograd.Function): + + @staticmethod + def forward(ctx, q, k, v, bias1=None, bias2=None): + """ + q, k, v: are in shape [*, L, H, D] + """ + bias1_ = bias1.contiguous() if bias1 is not None else torch.tensor([], dtype=q.dtype, device=q.device) + bias2_ = bias2.contiguous() if bias2 is not None else torch.tensor([], dtype=q.dtype, device=q.device) + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + o, lse = _attention(q, k, v, bias1_, bias2_) + ctx.save_for_backward(q, k, v, o, lse, bias1_, bias2_) + return o + + @staticmethod + def backward(ctx, grad_output): + q, k, v, o, lse, bias1, bias2 = ctx.saved_tensors + is_b1_grad = bias1.numel() != 0 and ctx.needs_input_grad[3] + is_b2_grad = bias2.numel() != 0 and ctx.needs_input_grad[4] + dQ, dK, dV, dB1, dB2 = attention_bwd(grad_output, q, k, v, o, lse, bias1, bias2, is_b1_grad, is_b2_grad) + if not is_b1_grad: + dB1 = None + if not is_b2_grad: + dB2 = None + return dQ, dK, dV, dB1, dB2 + + +def DS4Sci_EvoformerAttention(Q, K, V, biases): + assert len(biases) <= 2 + + if (len(biases) == 0): + biases.append(None) + + if (len(biases) == 1): + biases.append(None) + + bias_1_shape = lambda x: (x.shape[0], x.shape[1], 1, 1, x.shape[2]) + bias_2_shape = lambda x: (x.shape[0], 1, x.shape[3], x.shape[2], x.shape[2]) + + if biases[0] is not None: + assert biases[0].shape == bias_1_shape(Q), "bias1 shape is incorrect" + + if biases[1] is not None: + assert biases[1].shape == bias_2_shape(Q), "bias2 shape is incorrect" + + return EvoformerFusedAttention.apply(Q, K, V, biases[0], biases[1]) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..afe48159933c75045e9445a577d545b7ae260073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +import os +import pkgutil +import importlib + +from .builder import get_default_compute_capabilities, OpBuilder + +# Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder +__deepspeed__ = True + +# List of all available op builders from deepspeed op_builder +try: + import deepspeed.ops.op_builder # noqa: F401 # type: ignore + op_builder_dir = "deepspeed.ops.op_builder" +except ImportError: + op_builder_dir = "op_builder" + +__op_builders__ = [] + +this_module = sys.modules[__name__] + + +def builder_closure(member_name): + if op_builder_dir == "op_builder": + # during installation time cannot get builder due to torch not installed, + # return closure instead + def _builder(): + from deepspeed.accelerator import get_accelerator + builder = get_accelerator().create_op_builder(member_name) + return builder + + return _builder + else: + # during runtime, return op builder class directly + from deepspeed.accelerator import get_accelerator + builder = get_accelerator().get_op_builder(member_name) + return builder + + +# reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator +for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]): + if module_name != 'all_ops' and module_name != 'builder': + module = importlib.import_module(f".{module_name}", package=op_builder_dir) + for member_name in module.__dir__(): + if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder": + # assign builder name to variable with same name + # the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder" + this_module.__dict__[member_name] = builder_closure(member_name) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..630bcead209b71d9226c578bd8ed717e8683c5bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28833db138a1d0a6c45877d6c58260f5df00f897 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..073aa7ac16438fd52a624a92703a9db3d601a1da Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35103fbb11ea7c872a25810f5975f499e0daad7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc4216b7c7ce7ee1005b0e02a38ac2bc2e4bf878 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fff39d83a7ae8329916493370055983e45277a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7439c4dd910fa9618a8a6e435a670c73669802a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fcbf93343ce7a05b17cfa23d6c4f902c13a8d62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fp_quantizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fp_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a85274e00141584b3f65bacd6f9f2eeab8cdb58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fp_quantizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6e0ba131186688489e08a2da520f9e6fe26da9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca558062e3ed84d5000eff411779c418780eccdb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6311e2264558648e3b19b41b68d393cfd0899e26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d63c9b5accc26cb9a2921c9c813971d5e8047d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85cae79becce63de378334898c2c769cf8cd37f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6b2d0623ad7286f08dd8dd6070df0469ad21804 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0200e7df9dccb7faf88a346dcecf2d5d472dd02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b84792e783b6fcc02fa66a8455b98d67941e76e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0486033681b16543f1ab31f2db7ef6b6bbc8f1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb1bc38119023fdeba0e640ae44be1103b12d179 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c3d0e1d0ce38afcc0a593c6ff220745c839ee7f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8ee1d02709555c8eda08a14e2257c04f19bd2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22362cd660791e5a4ef47862aa6f887253c32cda Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f593f1cb52cb88a3b1e169e55e04a5a8e275ebbc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..b55c821910b9ffad0254f0e5104eb066252c446e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import OpBuilder + + +class AsyncIOBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + return ['csrc/aio/py_lib', 'csrc/aio/common'] + + def cxx_args(self): + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + return ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc825c7926da9ef124bd56bd99712ef3c97e2d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py @@ -0,0 +1,775 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import sys +import time +import importlib +from pathlib import Path +import subprocess +import shlex +import shutil +import tempfile +import distutils.ccompiler +import distutils.log +import distutils.sysconfig +from distutils.errors import CompileError, LinkError +from abc import ABC, abstractmethod +from typing import List + +YELLOW = '\033[93m' +END = '\033[0m' +WARNING = f"{YELLOW} [WARNING] {END}" + +DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions" +DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0" + +try: + import torch +except ImportError: + print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.") +else: + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + + +class MissingCUDAException(Exception): + pass + + +class CUDAMismatchException(Exception): + pass + + +def installed_cuda_version(name=""): + import torch.utils.cpp_extension + cuda_home = torch.utils.cpp_extension.CUDA_HOME + if cuda_home is None: + raise MissingCUDAException("CUDA_HOME does not exist, unable to compile CUDA op(s)") + # Ensure there is not a cuda version mismatch between torch and nvcc compiler + output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True) + output_split = output.split() + release_idx = output_split.index("release") + release = output_split[release_idx + 1].replace(',', '').split(".") + # Ignore patch versions, only look at major + minor + cuda_major, cuda_minor = release[:2] + return int(cuda_major), int(cuda_minor) + + +def get_default_compute_capabilities(): + compute_caps = DEFAULT_COMPUTE_CAPABILITIES + import torch.utils.cpp_extension + if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11: + if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0: + # Special treatment of CUDA 11.0 because compute_86 is not supported. + compute_caps += ";8.0" + else: + compute_caps += ";8.0;8.6" + return compute_caps + + +# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used +# to build deepspeed and system-wide installed cuda 11.2 +cuda_minor_mismatch_ok = { + 10: ["10.0", "10.1", "10.2"], + 11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"], + 12: ["12.0", "12.1", "12.2", "12.3"], +} + + +def assert_no_cuda_mismatch(name=""): + cuda_major, cuda_minor = installed_cuda_version(name) + sys_cuda_version = f'{cuda_major}.{cuda_minor}' + torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) + # This is a show-stopping error, should probably not proceed past this + if sys_cuda_version != torch_cuda_version: + if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major] + and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]): + print(f"Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda} " + "but since the APIs are compatible, accepting this combination") + return True + elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1": + print( + f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda}." + "Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior." + ) + return True + raise CUDAMismatchException( + f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the " + f"version torch was compiled with {torch.version.cuda}, unable to compile " + "cuda/cpp extensions without a matching cuda version.") + return True + + +class OpBuilder(ABC): + _rocm_version = None + _is_rocm_pytorch = None + _is_sycl_enabled = None + _loaded_ops = {} + + def __init__(self, name): + self.name = name + self.jit_mode = False + self.build_for_cpu = False + self.enable_bf16 = False + self.error_log = None + + @abstractmethod + def absolute_name(self): + ''' + Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam + will be installed as something like: deepspeed/ops/adam/cpu_adam.so + ''' + pass + + @abstractmethod + def sources(self): + ''' + Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) + ''' + pass + + def hipify_extension(self): + pass + + def sycl_extension(self): + pass + + @staticmethod + def validate_torch_version(torch_info): + install_torch_version = torch_info['version'] + current_torch_version = ".".join(torch.__version__.split('.')[:2]) + if install_torch_version != current_torch_version: + raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install torch version={install_torch_version}, " + f"Runtime torch version={current_torch_version}") + + @staticmethod + def validate_torch_op_version(torch_info): + if not OpBuilder.is_rocm_pytorch(): + current_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) + install_cuda_version = torch_info['cuda_version'] + if install_cuda_version != current_cuda_version: + raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install CUDA version={install_cuda_version}, " + f"Runtime CUDA version={current_cuda_version}") + else: + current_hip_version = ".".join(torch.version.hip.split('.')[:2]) + install_hip_version = torch_info['hip_version'] + if install_hip_version != current_hip_version: + raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed " + "with a different version than what is being used at runtime. " + f"Please re-install DeepSpeed or switch torch versions. " + f"Install HIP version={install_hip_version}, " + f"Runtime HIP version={current_hip_version}") + + @staticmethod + def is_rocm_pytorch(): + if OpBuilder._is_rocm_pytorch is not None: + return OpBuilder._is_rocm_pytorch + + _is_rocm_pytorch = False + try: + import torch + except ImportError: + pass + else: + if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5): + _is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None + if _is_rocm_pytorch: + from torch.utils.cpp_extension import ROCM_HOME + _is_rocm_pytorch = ROCM_HOME is not None + OpBuilder._is_rocm_pytorch = _is_rocm_pytorch + return OpBuilder._is_rocm_pytorch + + @staticmethod + def is_sycl_enabled(): + if OpBuilder._is_sycl_enabled is not None: + return OpBuilder._is_sycl_enabled + + _is_sycl_enabled = False + try: + result = subprocess.run(["c2s", "--version"], capture_output=True) + except: + pass + else: + _is_sycl_enabled = True + + OpBuilder._is_sycl_enabled = _is_sycl_enabled + return OpBuilder._is_sycl_enabled + + @staticmethod + def installed_rocm_version(): + if OpBuilder._rocm_version: + return OpBuilder._rocm_version + + ROCM_MAJOR = '0' + ROCM_MINOR = '0' + if OpBuilder.is_rocm_pytorch(): + from torch.utils.cpp_extension import ROCM_HOME + rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev") + if rocm_ver_file.is_file(): + with open(rocm_ver_file, 'r') as file: + ROCM_VERSION_DEV_RAW = file.read() + elif "rocm" in torch.__version__: + ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1] + else: + assert False, "Could not detect ROCm version" + assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version" + ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0] + ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1] + OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR)) + return OpBuilder._rocm_version + + def include_paths(self): + ''' + Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) + ''' + return [] + + def nvcc_args(self): + ''' + Returns optional list of compiler flags to forward to nvcc when building CUDA sources + ''' + return [] + + def cxx_args(self): + ''' + Returns optional list of compiler flags to forward to the build + ''' + return [] + + def is_compatible(self, verbose=True): + ''' + Check if all non-python dependencies are satisfied to build this op + ''' + return True + + def extra_ldflags(self): + return [] + + def has_function(self, funcname, libraries, verbose=False): + ''' + Test for existence of a function within a tuple of libraries. + + This is used as a smoke test to check whether a certain library is available. + As a test, this creates a simple C program that calls the specified function, + and then distutils is used to compile that program and link it with the specified libraries. + Returns True if both the compile and link are successful, False otherwise. + ''' + tempdir = None # we create a temporary directory to hold various files + filestderr = None # handle to open file to which we redirect stderr + oldstderr = None # file descriptor for stderr + try: + # Echo compile and link commands that are used. + if verbose: + distutils.log.set_verbosity(1) + + # Create a compiler object. + compiler = distutils.ccompiler.new_compiler(verbose=verbose) + + # Configure compiler and linker to build according to Python install. + distutils.sysconfig.customize_compiler(compiler) + + # Create a temporary directory to hold test files. + tempdir = tempfile.mkdtemp() + + # Define a simple C program that calls the function in question + prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname) + + # Write the test program to a file. + filename = os.path.join(tempdir, 'test.c') + with open(filename, 'w') as f: + f.write(prog) + + # Redirect stderr file descriptor to a file to silence compile/link warnings. + if not verbose: + filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w') + oldstderr = os.dup(sys.stderr.fileno()) + os.dup2(filestderr.fileno(), sys.stderr.fileno()) + + # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames() + # Otherwise, a local directory will be used instead of tempdir + drive, driveless_filename = os.path.splitdrive(filename) + root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else '' + output_dir = os.path.join(drive, root_dir) + + # Attempt to compile the C program into an object file. + cflags = shlex.split(os.environ.get('CFLAGS', "")) + objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags)) + + # Attempt to link the object file into an executable. + # Be sure to tack on any libraries that have been specified. + ldflags = shlex.split(os.environ.get('LDFLAGS', "")) + compiler.link_executable(objs, + os.path.join(tempdir, 'a.out'), + extra_preargs=self.strip_empty_entries(ldflags), + libraries=libraries) + + # Compile and link succeeded + return True + + except CompileError: + return False + + except LinkError: + return False + + except: + return False + + finally: + # Restore stderr file descriptor and close the stderr redirect file. + if oldstderr is not None: + os.dup2(oldstderr, sys.stderr.fileno()) + if filestderr is not None: + filestderr.close() + + # Delete the temporary directory holding the test program and stderr files. + if tempdir is not None: + shutil.rmtree(tempdir) + + def strip_empty_entries(self, args): + ''' + Drop any empty strings from the list of compile and link flags + ''' + return [x for x in args if len(x) > 0] + + def cpu_arch(self): + try: + from cpuinfo import get_cpu_info + except ImportError as e: + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return "-march=native" + + try: + cpu_info = get_cpu_info() + except Exception as e: + self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " + "falling back to `lscpu` to get this information.") + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return "-march=native" + + if cpu_info['arch'].startswith('PPC_'): + # gcc does not provide -march on PowerPC, use -mcpu instead + return '-mcpu=native' + return '-march=native' + + def is_cuda_enable(self): + try: + assert_no_cuda_mismatch(self.name) + return '-D__ENABLE_CUDA__' + except MissingCUDAException: + print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, " + "only cpu ops can be compiled!") + return '-D__DISABLE_CUDA__' + return '-D__DISABLE_CUDA__' + + def _backup_cpuinfo(self): + # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides + if not self.command_exists('lscpu'): + self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo " + "to detect the CPU architecture. 'lscpu' does not appear to exist on " + "your system, will fall back to use -march=native and non-vectorized execution.") + return None + result = subprocess.check_output('lscpu', shell=True) + result = result.decode('utf-8').strip().lower() + + cpu_info = {} + cpu_info['arch'] = None + cpu_info['flags'] = "" + if 'genuineintel' in result or 'authenticamd' in result: + cpu_info['arch'] = 'X86_64' + if 'avx512' in result: + cpu_info['flags'] += 'avx512,' + elif 'avx512f' in result: + cpu_info['flags'] += 'avx512f,' + if 'avx2' in result: + cpu_info['flags'] += 'avx2' + elif 'ppc64le' in result: + cpu_info['arch'] = "PPC_" + + return cpu_info + + def simd_width(self): + try: + from cpuinfo import get_cpu_info + except ImportError as e: + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return '-D__SCALAR__' + + try: + cpu_info = get_cpu_info() + except Exception as e: + self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " + "falling back to `lscpu` to get this information.") + cpu_info = self._backup_cpuinfo() + if cpu_info is None: + return '-D__SCALAR__' + + if cpu_info['arch'] == 'X86_64': + if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']: + return '-D__AVX512__' + elif 'avx2' in cpu_info['flags']: + return '-D__AVX256__' + return '-D__SCALAR__' + + def command_exists(self, cmd): + if '|' in cmd: + cmds = cmd.split("|") + else: + cmds = [cmd] + valid = False + for cmd in cmds: + result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True) + valid = valid or result.wait() == 0 + + if not valid and len(cmds) > 1: + print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!") + elif not valid and len(cmds) == 1: + print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!") + return valid + + def warning(self, msg): + self.error_log = f"{msg}" + print(f"{WARNING} {msg}") + + def deepspeed_src_path(self, code_path): + if os.path.isabs(code_path): + return code_path + else: + return os.path.join(Path(__file__).parent.parent.absolute(), code_path) + + def builder(self): + from torch.utils.cpp_extension import CppExtension + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + return CppExtension(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())}, + extra_link_args=self.strip_empty_entries(self.extra_ldflags())) + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops, torch_info, accelerator_name + from deepspeed.accelerator import get_accelerator + if installed_ops.get(self.name, False) and accelerator_name == get_accelerator()._name: + # Ensure the op we're about to load was compiled with the same + # torch/cuda versions we are currently using at runtime. + self.validate_torch_version(torch_info) + if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder): + self.validate_torch_op_version(torch_info) + + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module + else: + return self.jit_load(verbose) + + def jit_load(self, verbose=True): + if not self.is_compatible(verbose): + raise RuntimeError( + f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}" + ) + try: + import ninja # noqa: F401 # type: ignore + except ImportError: + raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.") + + if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch(): + self.build_for_cpu = not torch.cuda.is_available() + + self.jit_mode = True + from torch.utils.cpp_extension import load + + start_build = time.time() + sources = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.sources()] + extra_include_paths = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.include_paths()] + + # Torch will try and apply whatever CCs are in the arch list at compile time, + # we have already set the intended targets ourselves we know that will be + # needed at runtime. This prevents CC collisions such as multiple __half + # implementations. Stash arch list to reset after build. + torch_arch_list = None + if "TORCH_CUDA_ARCH_LIST" in os.environ: + torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") + os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + nvcc_args = self.strip_empty_entries(self.nvcc_args()) + cxx_args = self.strip_empty_entries(self.cxx_args()) + + if isinstance(self, CUDAOpBuilder): + if not self.build_for_cpu and self.enable_bf16: + cxx_args.append("-DBF16_AVAILABLE") + nvcc_args.append("-DBF16_AVAILABLE") + nvcc_args.append("-U__CUDA_NO_BFLOAT16_OPERATORS__") + nvcc_args.append("-U__CUDA_NO_BFLOAT162_OPERATORS__") + + if self.is_rocm_pytorch(): + cxx_args.append("-D__HIP_PLATFORM_AMD__=1") + + op_module = load(name=self.name, + sources=self.strip_empty_entries(sources), + extra_include_paths=self.strip_empty_entries(extra_include_paths), + extra_cflags=cxx_args, + extra_cuda_cflags=nvcc_args, + extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), + verbose=verbose) + + build_duration = time.time() - start_build + if verbose: + print(f"Time to load {self.name} op: {build_duration} seconds") + + # Reset arch list so we are not silently removing it for other possible use cases + if torch_arch_list: + os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list + + __class__._loaded_ops[self.name] = op_module + + return op_module + + +class CUDAOpBuilder(OpBuilder): + + def compute_capability_args(self, cross_compile_archs=None): + """ + Returns nvcc compute capability compile flags. + + 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`. + 2. If neither is set default compute capabilities will be used + 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX + + Format: + + - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples: + + TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ... + TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ... + + - `cross_compile_archs` uses ; separator. + + """ + ccs = [] + if self.jit_mode: + # Compile for underlying architectures since we know those at runtime + for i in range(torch.cuda.device_count()): + CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i) + cc = f"{CC_MAJOR}.{CC_MINOR}" + if cc not in ccs: + ccs.append(cc) + ccs = sorted(ccs) + ccs[-1] += '+PTX' + else: + # Cross-compile mode, compile for various architectures + # env override takes priority + cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None) + if cross_compile_archs_env is not None: + if cross_compile_archs is not None: + print( + f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`" + ) + cross_compile_archs = cross_compile_archs_env.replace(' ', ';') + else: + if cross_compile_archs is None: + cross_compile_archs = get_default_compute_capabilities() + ccs = cross_compile_archs.split(';') + + ccs = self.filter_ccs(ccs) + if len(ccs) == 0: + raise RuntimeError( + f"Unable to load {self.name} op due to no compute capabilities remaining after filtering") + + args = [] + self.enable_bf16 = True + for cc in ccs: + num = cc[0] + cc[2] + args.append(f'-gencode=arch=compute_{num},code=sm_{num}') + if cc.endswith('+PTX'): + args.append(f'-gencode=arch=compute_{num},code=compute_{num}') + + if int(cc[0]) <= 7: + self.enable_bf16 = False + + return args + + def filter_ccs(self, ccs: List[str]): + """ + Prune any compute capabilities that are not compatible with the builder. Should log + which CCs have been pruned. + """ + return ccs + + def version_dependent_macros(self): + # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 + version_ge_1_1 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): + version_ge_1_1 = ['-DVERSION_GE_1_1'] + version_ge_1_3 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): + version_ge_1_3 = ['-DVERSION_GE_1_3'] + version_ge_1_5 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): + version_ge_1_5 = ['-DVERSION_GE_1_5'] + return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 + + def is_compatible(self, verbose=True): + return super().is_compatible(verbose) + + def builder(self): + try: + if not self.is_rocm_pytorch(): + assert_no_cuda_mismatch(self.name) + self.build_for_cpu = False + except MissingCUDAException: + self.build_for_cpu = True + + if self.build_for_cpu: + from torch.utils.cpp_extension import CppExtension as ExtensionBuilder + else: + from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \ + {'cxx': self.strip_empty_entries(self.cxx_args()), \ + 'nvcc': self.strip_empty_entries(self.nvcc_args())} + + if not self.build_for_cpu and self.enable_bf16: + compile_args['cxx'].append("-DBF16_AVAILABLE") + + if self.is_rocm_pytorch(): + compile_args['cxx'].append("-D__HIP_PLATFORM_AMD__=1") + + cuda_ext = ExtensionBuilder(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + libraries=self.strip_empty_entries(self.libraries_args()), + extra_compile_args=compile_args, + extra_link_args=self.strip_empty_entries(self.extra_ldflags())) + + if self.is_rocm_pytorch(): + # hip converts paths to absolute, this converts back to relative + sources = cuda_ext.sources + curr_file = Path(__file__).parent.parent # ds root + for i in range(len(sources)): + src = Path(sources[i]) + if src.is_absolute(): + sources[i] = str(src.relative_to(curr_file)) + else: + sources[i] = str(src) + cuda_ext.sources = sources + return cuda_ext + + def hipify_extension(self): + if self.is_rocm_pytorch(): + from torch.utils.hipify import hipify_python + hipify_python.hipify( + project_directory=os.getcwd(), + output_directory=os.getcwd(), + header_include_dirs=self.include_paths(), + includes=[os.path.join(os.getcwd(), '*')], + extra_files=[os.path.abspath(s) for s in self.sources()], + show_detailed=True, + is_pytorch_extension=True, + hipify_extra_files_only=True, + ) + + def cxx_args(self): + if sys.platform == "win32": + return ['-O2'] + else: + return ['-O3', '-std=c++17', '-g', '-Wno-reorder'] + + def nvcc_args(self): + if self.build_for_cpu: + return [] + args = ['-O3'] + if self.is_rocm_pytorch(): + ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() + args += [ + '-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__', + '-U__HIP_NO_HALF2_OPERATORS__', + '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, + '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR + ] + else: + try: + nvcc_threads = int(os.getenv("DS_NVCC_THREADS", "")) + if nvcc_threads <= 0: + raise ValueError("") + except ValueError: + nvcc_threads = min(os.cpu_count(), 8) + + cuda_major, _ = installed_cuda_version() + args += [ + '-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math', + '-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', f'--threads={nvcc_threads}' + ] + if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1': + args.append('--ptxas-options=-v') + args += self.compute_capability_args() + return args + + def libraries_args(self): + if self.build_for_cpu: + return [] + + if sys.platform == "win32": + return ['cublas', 'curand'] + else: + return [] + + +class TorchCPUOpBuilder(CUDAOpBuilder): + + def extra_ldflags(self): + if self.build_for_cpu: + return ['-fopenmp'] + + if not self.is_rocm_pytorch(): + return ['-lcurand'] + + return [] + + def cxx_args(self): + import torch + args = [] + if not self.build_for_cpu: + if not self.is_rocm_pytorch(): + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64") + if not os.path.exists(CUDA_LIB64): + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib") + else: + CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib") + + args += super().cxx_args() + args += [ + f'-L{CUDA_LIB64}', + '-lcudart', + '-lcublas', + '-g', + ] + + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + CUDA_ENABLE = self.is_cuda_enable() + args += [ + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + CUDA_ENABLE, + ] + + return args diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..30238add3f905899b2be07f9c10eb8511fd1e01b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' + +from .comm import CCLCommBuilder, ShareMemCommBuilder +from .fused_adam import FusedAdamBuilder +from .cpu_adam import CPUAdamBuilder +from .no_impl import NotImplementedBuilder diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeab2653793f5a98efe272cfc083ba626aa3033a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52012c33b9fa1f812c709bb696211a596cd4cb44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de17a56ad975abf12a279fe968a993ab950eb0a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7457bde73a7efb064b1389cf1760296e04cfd92a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bc7027d4b02a6a96ee05d75fbf829e00726b53d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/no_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/no_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fba1ba02c05396cfc95dc940e232ce4041595c67 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/no_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/builder.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d2bc8eacfa2540a2dcc132fc7f97745597685ca7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/builder.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class CPUOpBuilder(OpBuilder): + + def builder(self): + from torch.utils.cpp_extension import CppExtension as ExtensionBuilder + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} + + cpp_ext = ExtensionBuilder(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + libraries=self.strip_empty_entries(self.libraries_args()), + extra_compile_args=compile_args) + + return cpp_ext + + def cxx_args(self): + return ['-O3', '-g', '-Wno-reorder'] + + def libraries_args(self): + return [] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/comm.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..69b9226fd80643281f6ec84eb3a323d07f38195a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/comm.py @@ -0,0 +1,71 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from .builder import CPUOpBuilder + + +class CCLCommBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_CCL_COMM" + NAME = "deepspeed_ccl_comm" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def sources(self): + return ['csrc/cpu/comm/ccl.cpp', 'csrc/cpu/comm/shm.cpp'] + + def include_paths(self): + includes = ['csrc/cpu/includes'] + return includes + + def cxx_args(self): + return ['-O2', '-fopenmp'] + + def is_compatible(self, verbose=True): + # TODO: add soft compatibility check for private binary release. + # a soft check, as in we know it can be trivially changed. + return super().is_compatible(verbose) + + def extra_ldflags(self): + ccl_root_path = os.environ.get("CCL_ROOT") + if ccl_root_path is None: + raise ValueError( + "Didn't find CCL_ROOT, install oneCCL from https://github.com/oneapi-src/oneCCL and source its environment variable" + ) + return [] + else: + return ['-lccl', f'-L{ccl_root_path}/lib'] + + +class ShareMemCommBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_SHM_COMM" + NAME = "deepspeed_shm_comm" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def sources(self): + return ['csrc/cpu/comm/shm_interface.cpp', 'csrc/cpu/comm/shm.cpp'] + + def include_paths(self): + includes = ['csrc/cpu/includes'] + return includes + + def cxx_args(self): + return ['-O2', '-fopenmp'] + + def is_compatible(self, verbose=True): + # TODO: add soft compatibility check for private binary release. + # a soft check, as in we know it can be trivially changed. + return super().is_compatible(verbose) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/cpu_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..0c8438aea40d4ffd588a496844d692c9a17faaa6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/cpu_adam.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class CPUAdamBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def libraries_args(self): + args = super().libraries_args() + return args + + def include_paths(self): + return ['csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..34b43825b09024136afdc44e916349ea3f5ce5ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/fused_adam.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class FusedAdamBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def include_paths(self): + return ['csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..69d114a9f1c0b0defc482c1fb143c261fc466125 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class NotImplementedBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on CPU backend.") + + def sources(self): + return [] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..5c16d10ebb4453bc222001a95867631c6e5509f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py @@ -0,0 +1,48 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from .builder import TorchCPUOpBuilder + + +class CPULionBuilder(TorchCPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_LION" + NAME = "cpu_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] + + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp', 'csrc/common/custom_cuda_kernel.cu'] + + def libraries_args(self): + args = super().libraries_args() + if self.build_for_cpu: + return args + + if not self.is_rocm_pytorch(): + args += ['curand'] + + return args + + def include_paths(self): + import torch + if self.build_for_cpu: + CUDA_INCLUDE = [] + elif not self.is_rocm_pytorch(): + CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] + else: + CUDA_INCLUDE = [ + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"), + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"), + os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"), + ] + return ['csrc/includes'] + CUDA_INCLUDE diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..6e7721f94e012b7c22c28d83ce74220b7c4ff9a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/evoformer_attn.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version +import os + + +class EvoformerAttnBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_EVOFORMER_ATTN" + NAME = "evoformer_attn" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + self.cutlass_path = os.environ.get('CUTLASS_PATH') + + def absolute_name(self): + return f'deepspeed.ops.{self.NAME}_op' + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def sources(self): + src_dir = 'csrc/deepspeed4science/evoformer_attn' + return [f'{src_dir}/attention.cpp', f'{src_dir}/attention_back.cu', f'{src_dir}/attention_cu.cu'] + + def nvcc_args(self): + args = super().nvcc_args() + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile kernels") + return args + major = torch.cuda.get_device_properties(0).major #ignore-cuda + minor = torch.cuda.get_device_properties(0).minor #ignore-cuda + args.append(f"-DGPU_ARCH={major}{minor}") + return args + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile kernels") + return False + if self.cutlass_path is None: + self.warning("Please specify the CUTLASS repo directory as environment variable $CUTLASS_PATH") + return False + with open(f'{self.cutlass_path}/CHANGELOG.md', 'r') as f: + if '3.1.0' not in f.read(): + self.warning("Please use CUTLASS version >= 3.1.0") + return False + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 7: + self.warning("Please use a GPU with compute capability >= 7.0") + cuda_okay = False + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("Please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def include_paths(self): + includes = [f'{self.cutlass_path}/include', f'{self.cutlass_path}/tools/util/include'] + return includes diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fp_quantizer.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fp_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..bafd3e0c33f610058b6e56b2f5eadf3160b514f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fp_quantizer.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class FPQuantizerBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_FP_QUANTIZER" + NAME = "fp_quantizer" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.fp_quantizer.{self.NAME}_op' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 8: + self.warning("NVIDIA Inference is only supported on Ampere and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 8: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def sources(self): + return [ + "csrc/fp_quantizer/quantize.cu", + "csrc/fp_quantizer/quantize.cpp", + ] + + def extra_ldflags(self): + return ['-lcurand'] + + def include_paths(self): + return ['csrc/fp_quantizer/includes', 'csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6e4eeaaea5d9b2c2ee70de3d4261c6348abe94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_adam.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + +import sys + + +class FusedAdamBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu'] + + def include_paths(self): + return ['csrc/includes', 'csrc/adam'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() + + def nvcc_args(self): + nvcc_flags = ['-O3'] + self.version_dependent_macros() + if not self.is_rocm_pytorch(): + nvcc_flags.extend( + ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + + self.compute_capability_args()) + return nvcc_flags diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..b900a8f2369df14d65a73235d360db21583ee4ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/fused_lion.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + +import sys + + +class FusedLionBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_LION" + NAME = "fused_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + return ['csrc/lion/fused_lion_frontend.cpp', 'csrc/lion/multi_tensor_lion.cu'] + + def include_paths(self): + return ['csrc/includes', 'csrc/lion'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() + + def nvcc_args(self): + nvcc_flags = ['-O3'] + self.version_dependent_macros() + if not self.is_rocm_pytorch(): + nvcc_flags.extend( + ['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] + + self.compute_capability_args()) + return nvcc_flags diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..d1957f39d9a86fd99d416a4f0e2d1622eb2eb0b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/inference_core_ops.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class InferenceCoreBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_INFERENCE_CORE_OPS" + NAME = "inference_core_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.kernels{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 6: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + sources = [ + "inference/v2/kernels/core_ops/core_ops.cpp", + "inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp", + "inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu", + "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp", + "inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu", + "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp", + "inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu", + "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp", + "inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu", + "inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp", + "inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu", + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + return [] + + def include_paths(self): + sources = [ + 'inference/v2/kernels/core_ops/bias_activations', + 'inference/v2/kernels/core_ops/blas_kernels', + 'inference/v2/kernels/core_ops/cuda_layer_norm', + 'inference/v2/kernels/core_ops/cuda_rms_norm', + 'inference/v2/kernels/core_ops/gated_activations', + 'inference/v2/kernels/core_ops/cuda_linear', + 'inference/v2/kernels/includes', + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + + return sources diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..86560353b1c761f157512613f8f31ab0786fafd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/async_io.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import NPUOpBuilder + + +class AsyncIOBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/aio/py_lib', 'csrc/aio/common'] + return args + + def cxx_args(self): + args = super().cxx_args() + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return args + [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + args = super().extra_ldflags() + return args + ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..a4e9569c0f336122cd003a2df5e196527d84666c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adam.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPUAdamBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..fc1bc83c7cc7ca86bf8b97ea0fca773b79e4fb3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/fused_adam.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + +try: + import torch_npu +except ImportError as e: + pass + + +class NPUFusedAdam: + + @staticmethod + def multi_tensor_adam(chunk_size, noop_flag_buffer, tensor_lists, lr, beta1, beta2, epsilon, step, adam_w_mode, + bias_correction, weight_decay, *args): + bias_correction1 = beta1**step + bias_correction2 = beta2**step + + # iteration group['params'] + for i in range(len(tensor_lists[0])): + grad_flat = tensor_lists[0][i] + param_flat = tensor_lists[1][i] + m_flat = tensor_lists[2][i] + v_flat = tensor_lists[3][i] + + if adam_w_mode: + param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam_w( + bias_correction1, + bias_correction2, + lr, + weight_decay, + beta1, + beta2, + epsilon, + grad_flat, + None, # max_grad_norm + False, # amsgrad + False, # maximize + out=(param_flat.data, m_flat, v_flat)) + else: + param_flat.data, m_flat, v_flat = torch_npu.npu_apply_adam( + bias_correction1, + bias_correction2, + lr, + beta1, + beta2, + epsilon, + grad_flat, + False, # use_locking + False, # use_nesterov + out=(param_flat.data, m_flat, v_flat)) + + +class FusedAdamBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return [] + + def include_paths(self): + return [] + + def load(self, verbose=True): + return NPUFusedAdam diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..46f28c0d401161f70431776a5a53387235ebb5ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/inference.py @@ -0,0 +1,307 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import IntEnum +from .builder import NPUOpBuilder + +try: + import torch + import torch_npu +except ImportError as e: + pass + + +class ActivationFuncType(IntEnum): + UNKNOWN = 0 + GELU = 1 + ReLU = 2 + GATED_GELU = 3 + GATED_SILU = 4 + + +class InferenceContext: + _workspace = None + + _seed = 42 + _curr_offset = 0 + _stream = 0 + _free_memory_size = 0 + _num_tokens = 1 + _attention_unfused_workspace_offset = 0 + _workSpaceSize = 0 + + workSpaceSize = 0 + kv_caches = None + + @staticmethod + def reset_tokens(initial_tokens=1): + InferenceContext._num_tokens = initial_tokens + + @staticmethod + def current_tokens(): + return InferenceContext._num_tokens + + @staticmethod + def GetWorkSpace(): + return InferenceContext._workspace + + +class NPUInference: + + @staticmethod + def layer_norm(inputs, gamma, beta, epsilon): + return torch.nn.functional.layer_norm(inputs, [inputs.shape[-1]], gamma, beta, eps=epsilon) + + @staticmethod + def _qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = torch.nn.functional.layer_norm(inputs, (inputs.shape[2], ), gamma, beta, eps) + weight = weight.t() if transpose else weight + tmp = torch.matmul(inp_norm, weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + return output + + @staticmethod + def qkv_gemm_fp16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def qkv_gemm_bf16(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def qkv_gemm_fp32(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + return NPUInference._qkv_gemm(inputs, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose) + + @staticmethod + def _bias_add_transform_0213(vals, bias, hidden_dim, seq_length, seq_offset, heads, num_kv, rotary_dim, + rotate_half, rotate_every_two, rope_theta): + bsz, _, _ = vals.shape + q = vals[..., :hidden_dim].reshape(bsz, seq_length, heads, -1) + k = vals[..., hidden_dim:hidden_dim + num_kv * (hidden_dim // heads)].reshape(bsz, seq_length, num_kv, -1) + v = vals[..., hidden_dim + num_kv * (hidden_dim // heads):] + + if rotary_dim > 0 and rotate_every_two: + # sin, cos may use cache + seq_id = torch.arange(0, seq_length).to("npu") + inv_freq = torch.arange(0, rotary_dim, 2) / rotary_dim + inv_freq = inv_freq.to("npu") + inv_freq = 1.0 / torch.pow(rope_theta, inv_freq) + inv_freq = torch.outer(seq_id, inv_freq) + sin = inv_freq.sin() + cos = inv_freq.cos() + # shape: [bsz=1, seq_len, heads=1, rotary_dim] + sin = sin.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1) + cos = cos.view(-1, seq_length, 1, rotary_dim // 2).repeat_interleave(2, dim=-1) + + q_pos, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] + k_pos, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] + + q_pos = torch_npu.npu_rotary_mul(q_pos, cos, sin) + q = torch.cat([q_pos, q_pass], dim=-1) + k_pos = torch_npu.npu_rotary_mul(k_pos, cos, sin) + k = torch.cat([k_pos, k_pass], dim=-1) + + output = q.reshape(bsz, seq_length, -1).contiguous() # [b, s, H] + k_cache = k.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d] + v_cache = v.reshape(bsz, seq_length, heads, -1).transpose(1, 2).contiguous() # [b, n, s, d] + return output, k_cache, v_cache + + @staticmethod + def _softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + bsz, seq_len, k = query_key_value.size() + k = k // (heads + 2 * (num_kv if num_kv > 0 else heads)) + hidden_dim = heads * k + + is_promt = seq_len > 1 + if not InferenceContext.kv_caches: + InferenceContext.kv_caches = [[None, None] for _ in range(num_layers)] + if is_promt: + InferenceContext.reset_tokens(seq_len) + InferenceContext.kv_caches[layer_id] = [None, None] + + soft_len = InferenceContext.current_tokens() + workspace = InferenceContext.GetWorkSpace() + seq_offset = 0 if is_promt else soft_len - 1 + + q, k, v = NPUInference._bias_add_transform_0213(vals=query_key_value, + bias=None, + hidden_dim=hidden_dim, + seq_length=seq_len, + seq_offset=seq_offset, + heads=heads, + num_kv=num_kv if num_kv > 0 else heads, + rotary_dim=rotary_dim, + rotate_half=rotate_half, + rotate_every_two=rotate_every_two, + rope_theta=rope_theta) + + if not is_promt: + k_cache, v_cache = InferenceContext.kv_caches[layer_id] + if k_cache is not None: + k = torch.cat([k_cache, k], dim=2) + v = torch.cat([v_cache, v], dim=2) + InferenceContext.kv_caches[layer_id] = [k, v] + seq_len = k.shape[2] + + layer_scale = max(1, layer_id) if len(alibi.size()) > 1 else 1.0 + alpha = norm_factor * norm_factor / layer_scale + + output = torch_npu.npu_fusion_attention(q, + k.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(), + v.transpose(1, 2).reshape(bsz, seq_len, -1).contiguous(), + heads, + "BSH", + pse=None, + padding_mask=None, + atten_mask=attn_mask.bool(), + scale=alpha, + pre_tockens=65536, + next_tockens=65536, + keep_prob=1, + inner_precise=0)[0] + + return output, k, v + + @staticmethod + def softmax_context_fp16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def softmax_context_bf16(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def softmax_context_fp32(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, + norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id, + num_layers, alibi, rope_theta): + return NPUInference._softmax_context(query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, + heads, num_kv, norm_factor, triangular_masking, local_attention, + window_size, no_masking, layer_id, num_layers, alibi, rope_theta) + + @staticmethod + def _vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode): + if transposed_mode: + return torch.matmul(input, weight.t()) + return torch.matmul(input, weight) + + @staticmethod + def vector_matmul_fp16(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def vector_matmul_bf16(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def vector_matmul_fp32(input, weight, async_op, q_scale, q_int8, transposed_mode): + return NPUInference._vector_matmul(input, weight, async_op, q_scale, q_int8, transposed_mode) + + @staticmethod + def _mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + if mlp_after_attn: + residual_add = torch.nn.functional.layer_norm(input + residual + input_bias, (input.shape[-1], ), gamma, + beta, eps) + else: + residual_add = torch.nn.functional.layer_norm(input, (input.shape[-1], ), gamma, beta, eps) + + weight_interm = weight_interm.t() if transpose else weight_interm + tmp = torch.matmul(residual_add, weight_interm) + if mlp_act_func_type == ActivationFuncType.GELU: + tmp = torch.nn.functional.gelu(tmp + bias) + elif mlp_act_func_type == ActivationFuncType.ReLU: + tmp = torch.nn.functional.relu(tmp + bias) + else: + raise Exception('Unsupported ActivationFuncType {}'.format(mlp_act_func_type)) + output = torch.matmul(tmp, weight_out.t()) + return output, residual_add + + @staticmethod + def mlp_gemm_fp16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def mlp_gemm_bf16(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def mlp_gemm_fp32(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, + mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): + return NPUInference._mlp_gemm(input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, + pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, + mlp_act_func_type, transpose) + + @staticmethod + def _residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + if add_bias: + residual += attention_bias.float() + tmp = hidden_state.float() + attention_output.float() + (residual.float() + final_bias.float()) / mp_size + + input_dtype = hidden_state.dtype + residual.set_(tmp.to(input_dtype)) + + @staticmethod + def residual_add_bias_fp16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + @staticmethod + def residual_add_bias_bf16(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + @staticmethod + def residual_add_bias_fp32(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + return NPUInference._residual_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + mp_size, mlp_after_attn, add_bias, pre_layer_norm) + + +class InferenceBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.transformer.inference.{self.NAME}_op' + + def sources(self): + return [] + + def include_paths(self): + return [] + + def load(self, verbose=True): + return NPUInference diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..89450e1fd30d9f4e03d578a5c7deebc4d26a50c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/ragged_utils.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class RaggedUtilsBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_RAGGED_OPS" + NAME = "ragged_ops" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.inference.v2.{self.NAME}' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda + if cuda_capability < 6: + self.warning("NVIDIA Inference is only supported on Pascal and newer architectures") + cuda_okay = False + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def filter_ccs(self, ccs): + ccs_retained = [] + ccs_pruned = [] + for cc in ccs: + if int(cc[0]) >= 6: + ccs_retained.append(cc) + else: + ccs_pruned.append(cc) + if len(ccs_pruned) > 0: + self.warning(f"Filtered compute capabilities {ccs_pruned}") + return ccs_retained + + def get_prefix(self): + ds_path = self.deepspeed_src_path("deepspeed") + return "deepspeed" if os.path.isdir(ds_path) else ".." + + def sources(self): + sources = [ + "inference/v2/ragged/csrc/fast_host_buffer.cu", + "inference/v2/ragged/csrc/ragged_ops.cpp", + ] + + prefix = self.get_prefix() + sources = [os.path.join(prefix, src) for src in sources] + return sources + + def extra_ldflags(self): + return [] + + def include_paths(self): + include_dirs = ['inference/v2/ragged/includes', 'inference/v2/kernels/includes'] + prefix = self.get_prefix() + includes = [os.path.join(prefix, include_dir) for include_dir in include_dirs] + + return includes diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py new file mode 100644 index 0000000000000000000000000000000000000000..54af7150fb36f9eb8bd6a295648d8a11da8d8373 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/random_ltd.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder + + +class RandomLTDBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_RANDOM_LTD" + NAME = "random_ltd" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.{self.NAME}_op' + + def extra_ldflags(self): + if not self.is_rocm_pytorch(): + return ['-lcurand'] + else: + return [] + + def sources(self): + return [ + 'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu', + 'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu' + ] + + def include_paths(self): + includes = ['csrc/includes'] + return includes diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..59caf57f938db217c52fcfa483e47fc3e451e6ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/spatial_inference.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CUDAOpBuilder, installed_cuda_version + + +class SpatialInferenceBuilder(CUDAOpBuilder): + BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE" + NAME = "spatial_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.spatial.{self.NAME}_op' + + def is_compatible(self, verbose=True): + try: + import torch + except ImportError: + self.warning("Please install torch if trying to pre-compile inference kernels") + return False + + cuda_okay = True + if not self.is_rocm_pytorch() and torch.cuda.is_available(): + sys_cuda_major, _ = installed_cuda_version() + torch_cuda_major = int(torch.version.cuda.split('.')[0]) + cuda_capability = torch.cuda.get_device_properties(0).major + if cuda_capability >= 8: + if torch_cuda_major < 11 or sys_cuda_major < 11: + self.warning("On Ampere and higher architectures please use CUDA 11+") + cuda_okay = False + return super().is_compatible(verbose) and cuda_okay + + def sources(self): + return [ + 'csrc/spatial/csrc/opt_bias_add.cu', + 'csrc/spatial/csrc/pt_binding.cpp', + ] + + def include_paths(self): + return ['csrc/spatial/includes', 'csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..52b02a3c629e9d537b8e22139f48f5335396e4ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/stochastic_transformer.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .transformer import TransformerBuilder + + +class StochasticTransformerBuilder(TransformerBuilder): + BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER" + NAME = "stochastic_transformer" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.transformer.{self.NAME}_op' + + def nvcc_args(self): + args = super().nvcc_args() + args.append('-D__STOCHASTIC_MODE__') + return args diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2815f164e5f2598b5f6f9a4badfbc0a8969c7a5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_adam import CPUAdamBuilder +from .cpu_adagrad import CPUAdagradBuilder +from .fused_adam import FusedAdamBuilder +from .async_io import AsyncIOBuilder diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea7d53c8ed107cdf32ae3b4d78b7a6d52e80e73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9dcc61f8b9b8f016d1c51e95173cefe1d7c3806 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/async_io.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be1ef7beac3ee3ca4e7bb70ec458153651402d8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c089884b224dd419c4ec2c5f7fc532882177d5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e211047bd90e529a0983f91359d7de0712b2d93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c34c0863c6085a75f45feb2aac327ca4b114f87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py new file mode 100644 index 0000000000000000000000000000000000000000..0fd43f72e60ea19f4dd167062eb1334571eee0eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/async_io.py @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import distutils.spawn +import subprocess + +from .builder import OpBuilder + + +class AsyncIOBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_AIO" + NAME = "async_io" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.aio.{self.NAME}_op' + + def sources(self): + return [ + 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp', + 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', + 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp', + 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp', + 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp' + ] + + def include_paths(self): + return ['csrc/aio/py_lib', 'csrc/aio/common'] + + def cxx_args(self): + import torch + # -O0 for improved debugging, since performance is bound by I/O + CPU_ARCH = self.cpu_arch() + SIMD_WIDTH = self.simd_width() + TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2]) + if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1: + CPP_STD = '-std=c++17' + else: + CPP_STD = '-std=c++14' + return [ + '-g', + '-Wall', + '-O0', + CPP_STD, + '-shared', + '-fPIC', + '-Wno-reorder', + CPU_ARCH, + '-fopenmp', + SIMD_WIDTH, + '-laio', + ] + + def extra_ldflags(self): + return ['-laio'] + + def check_for_libaio_pkg(self): + libs = dict( + dpkg=["-l", "libaio-dev", "apt"], + pacman=["-Q", "libaio", "pacman"], + rpm=["-q", "libaio-devel", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + self.warning(f"{self.NAME}: please install the {lib} package with {tool}") + break + return found + + def is_compatible(self, verbose=True): + # Check for the existence of libaio by using distutils + # to compile and link a test program that calls io_submit, + # which is a function provided by libaio that is used in the async_io op. + # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS + # respectively to specify the directories for libaio.h and libaio.so. + aio_compatible = self.has_function('io_pgetevents', ('aio', )) + if verbose and not aio_compatible: + self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.") + + # Check for the libaio package via known package managers + # to print suggestions on which package to install. + self.check_for_libaio_pkg() + + self.warning( + "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." + ) + return super().is_compatible(verbose) and aio_compatible diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..81b15f197f4375b6138f50cf865b2dd64194c629 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/builder.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import time +import importlib + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class SYCLOpBuilder(OpBuilder): + + def builder(self): + try: + from intel_extension_for_pytorch.xpu.cpp_extension import DPCPPExtension + except ImportError: + from intel_extension_for_pytorch.xpu.utils import DPCPPExtension + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + print("dpcpp sources = {}".format(self.sources())) + dpcpp_ext = DPCPPExtension(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + extra_compile_args={ + 'cxx': self.strip_empty_entries(self.cxx_args()), + }, + extra_link_args=self.strip_empty_entries(self.fixed_aotflags())) + return dpcpp_ext + + def version_dependent_macros(self): + try: + from op_builder.builder import TORCH_MAJOR, TORCH_MINOR + except ImportError: + from deepspeed.ops.op_builder.builder import TORCH_MAJOR, TORCH_MINOR + # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 + version_ge_1_1 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): + version_ge_1_1 = ['-DVERSION_GE_1_1'] + version_ge_1_3 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): + version_ge_1_3 = ['-DVERSION_GE_1_3'] + version_ge_1_5 = [] + if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): + version_ge_1_5 = ['-DVERSION_GE_1_5'] + return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 + + def cxx_args(self): + cxx_flags = [ + '-fsycl', '-fsycl-targets=spir64_gen', '-g', '-gdwarf-4', '-O3', '-std=c++17', '-fPIC', '-DMKL_ILP64', + '-fno-strict-aliasing' + ] + if os.environ.get('USE_MKL_GEMM'): + cxx_flags.append('-DUSE_MKL_GEMM') + return cxx_flags + + def extra_ldflags(self): + return [ + '-fPIC', '-fsycl', '-fsycl-targets=spir64_gen', '-fsycl-max-parallel-link-jobs=8', + '-Xs "-options -cl-poison-unsupported-fp64-kernels,cl-intel-enable-auto-large-GRF-mode"', + '-Xs "-device pvc"', '-Wl,-export-dynamic' + ] + + def fixed_aotflags(self): + return [ + '-fsycl', '-fsycl-targets=spir64_gen', '-fsycl-max-parallel-link-jobs=8', '-Xs', + "-options -cl-poison-unsupported-fp64-kernels,cl-intel-enable-auto-large-GRF-mode", '-Xs', "-device pvc" + ] + + def load(self, verbose=True): + from deepspeed.git_version_info import installed_ops, torch_info, accelerator_name # noqa: F401 + from deepspeed.accelerator import get_accelerator + if installed_ops.get(self.name, False) and accelerator_name == get_accelerator()._name: + return importlib.import_module(self.absolute_name()) + else: + return self.jit_load(verbose) + + def jit_load(self, verbose=True): + if not self.is_compatible(verbose): + raise RuntimeError( + f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}" + ) + try: + import ninja # noqa: F401 + except ImportError: + raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.") + + self.jit_mode = True + from intel_extension_for_pytorch.xpu.cpp_extension import load + + start_build = time.time() + # Recognize relative paths as absolute paths for jit load + + sources = [self.deepspeed_src_path(path) for path in self.sources()] + extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()] + + # Torch will try and apply whatever CCs are in the arch list at compile time, + # we have already set the intended targets ourselves we know that will be + # needed at runtime. This prevents CC collisions such as multiple __half + # implementations. Stash arch list to reset after build. + ''' + torch_arch_list = None + if "TORCH_CUDA_ARCH_LIST" in os.environ: + torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") + os.environ["TORCH_CUDA_ARCH_LIST"] = "" + ''' + + op_module = load( + name=self.name, + sources=self.strip_empty_entries(sources), + extra_include_paths=self.strip_empty_entries(extra_include_paths), + extra_cflags=self.strip_empty_entries(self.cxx_args()), + # extra_cuda_cflags=self.strip_empty_entries(self.nvcc_args()), + extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), + verbose=verbose) + + build_duration = time.time() - start_build + if verbose: + print(f"Time to load {self.name} op: {build_duration} seconds") + ''' + # Reset arch list so we are not silently removing it for other possible use cases + if torch_arch_list: + os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list + ''' + return op_module diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..18f80848e1b800de7f8826405bb9ea9f500db263 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adagrad.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import SYCLOpBuilder + + +class CPUAdagradBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" + NAME = "cpu_adagrad" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adagrad.{self.NAME}_op' + + def sources(self): + return ['csrc/xpu/adagrad/cpu_adagrad.cpp', 'csrc/xpu/common/custom_cuda_kernel.dp.cpp'] + + def include_paths(self): + return ['csrc/xpu/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..4c7d4d11983987889277f21073e0433bc9440444 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/cpu_adam.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import SYCLOpBuilder + + +class CPUAdamBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + if self.build_for_cpu: + return ['csrc/xpu/adam/cpu_adam.cpp', 'csrc/xpu/adam/cpu_adam_impl.cpp'] + + return [ + 'csrc/xpu/adam/cpu_adam.cpp', 'csrc/xpu/adam/cpu_adam_impl.cpp', + 'csrc/xpu/common/custom_cuda_kernel.dp.cpp' + ] + + def include_paths(self): + return ['csrc/xpu/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..0e0f1a66f8e648305d2ba6fd0ad85a384e6f8bd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/xpu/fused_adam.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +from .builder import SYCLOpBuilder + + +class FusedAdamBuilder(SYCLOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/xpu/adam/fused_adam_frontend.cpp', 'csrc/xpu/adam/multi_tensor_adam.dp.cpp'] + + def include_paths(self): + return ['csrc/xpu/includes', 'csrc/xpu/adam'] + + def cxx_args(self): + args = super().cxx_args() + return args + self.version_dependent_macros() diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..cfe21c720a6a6f97d6857de1d0cf268ab20dda53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdf-1119.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82f899edc59cb41fdd671b256a228e5e06dfc5e24c92712e75005b251b000865 +size 1108 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..3265a7d933efe836193228b86e84c6c7a8b45afd --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jdq-1119.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef7cbcb58c2edcfea45c058b751faf7783e710462a924e9aacad8d47a7e9f94b +size 1549 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdf-40589.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdf-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d9ac42c2bbe778d928f3da1e09e3099962e412ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdf-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:826bab057a3929f41189bc51afa0a1752695e63ccf20e128ca6129e9e3321fc2 +size 856 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..f8f940438f61ac6fbeaa00c46741c80579af46eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4da63a60163340b6e18922abfe7f1f2a7a7da23da63c269324985d61ffaa6075 +size 318 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..61a0b7bc6260b766ad0c03786a40d306843a53b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jd-42074.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f443b3add7375ca92ece9296b8449a0780305d3b5210c84994bdeab36271d62a +size 584 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c152f7e5d9f72441b2fc6aa9f96af8f9ef9fc690 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42074/api-v1-jdq-42074.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8742a74bd5bc120acd9186c8a8737cb420ed9b009fade00b24e7ce5217797f2c +size 722 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a7ff82cef2a309d55bcae99900bdd51b6bbc675e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jd-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5c7e79aa41ef580838fb9fc1906280f076c47be1741fddd5004ddb500eb57fe +size 898 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..25bb1bc7760d28c156677d8d257421b3805299c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe4736924606638984e573235191025d419c545d31dc8874c96b72f5ec5db73 +size 2342