applied-ai-018 commited on
Commit
e19ef9b
·
verified ·
1 Parent(s): 359a939

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py +7 -0
  2. venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py +181 -0
  7. venv/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py +195 -0
  8. venv/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py +17 -0
  9. venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/evoformer_attn.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/evoformer_attn.py +106 -0
  12. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py +53 -0
  13. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fp_quantizer.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py +99 -0
  37. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py +775 -0
  38. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__init__.py +10 -0
  39. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/cpu_adam.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/no_impl.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/builder.py +36 -0
  46. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/comm.py +71 -0
  47. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/cpu_adam.py +27 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/fused_adam.py +23 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py +24 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py +48 -0
venv/lib/python3.10/site-packages/deepspeed/ops/adam/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cpu_adam import DeepSpeedCPUAdam
7
+ from .fused_adam import FusedAdam
venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (279 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adam/__pycache__/multi_tensor_apply.cpython-310.pyc ADDED
Binary file (795 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adam/cpu_adam.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from cpuinfo import get_cpu_info
8
+ from deepspeed.utils import logger
9
+ from deepspeed.utils.logging import should_log_le
10
+ from deepspeed.ops.op_builder import CPUAdamBuilder
11
+
12
+
13
+ class DeepSpeedCPUAdam(torch.optim.Optimizer):
14
+ optimizer_id = 0
15
+
16
+ def __init__(self,
17
+ model_params,
18
+ lr=1e-3,
19
+ bias_correction=True,
20
+ betas=(0.9, 0.999),
21
+ eps=1e-8,
22
+ weight_decay=0,
23
+ amsgrad=False,
24
+ adamw_mode=True,
25
+ fp32_optimizer_states=True):
26
+ """Fast vectorized implementation of two variations of Adam optimizer on CPU:
27
+
28
+ * Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
29
+ * AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
30
+
31
+ DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
32
+ In order to apply this optimizer, the model requires to have its master parameter (in FP32)
33
+ reside on the CPU memory.
34
+
35
+ To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
36
+ the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
37
+ with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
38
+ the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
39
+ (https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
40
+
41
+ For calling step function, there are two options available: (1) update optimizer's states and (2) update
42
+ optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
43
+ option can bring 30% higher throughput than the doing the copy separately using option one.
44
+
45
+
46
+ .. note::
47
+ We recommend using our `config
48
+ <https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
49
+ to allow :meth:`deepspeed.initialize` to build this optimizer
50
+ for you.
51
+
52
+
53
+ Arguments:
54
+ model_params (iterable): iterable of parameters to optimize or dicts defining
55
+ parameter groups.
56
+ lr (float, optional): learning rate. (default: 1e-3)
57
+ betas (Tuple[float, float], optional): coefficients used for computing
58
+ running averages of gradient and its square. (default: (0.9, 0.999))
59
+ eps (float, optional): term added to the denominator to improve
60
+ numerical stability. (default: 1e-8)
61
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
62
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
63
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
64
+ (default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
65
+ adamw_mode: select between Adam and AdamW implementations (default: AdamW)
66
+ fp32_optimizer_states: creates momentum and variance in full precision regardless of
67
+ the precision of the parameters (default: True)
68
+ """
69
+
70
+ default_args = dict(lr=lr,
71
+ betas=betas,
72
+ eps=eps,
73
+ weight_decay=weight_decay,
74
+ bias_correction=bias_correction,
75
+ amsgrad=amsgrad)
76
+ super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
77
+
78
+ cpu_info = get_cpu_info()
79
+ self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
80
+ if "amd" in self.cpu_vendor:
81
+ for group_id, group in enumerate(self.param_groups):
82
+ for param_id, p in enumerate(group['params']):
83
+ if p.dtype == torch.half:
84
+ logger.warning("FP16 params for CPUAdam may not work on AMD CPUs")
85
+ break
86
+ else:
87
+ continue
88
+ break
89
+
90
+ self.opt_id = DeepSpeedCPUAdam.optimizer_id
91
+ DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
92
+ self.adam_w_mode = adamw_mode
93
+ self.fp32_optimizer_states = fp32_optimizer_states
94
+ self.ds_opt_adam = CPUAdamBuilder().load()
95
+
96
+ self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode,
97
+ should_log_le("info"))
98
+
99
+ def __del__(self):
100
+ # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
101
+ # is used multiple times in the same process (notebook or pytest worker)
102
+ self.ds_opt_adam.destroy_adam(self.opt_id)
103
+
104
+ def __setstate__(self, state):
105
+ super(DeepSpeedCPUAdam, self).__setstate__(state)
106
+ for group in self.param_groups:
107
+ group.setdefault('amsgrad', False)
108
+
109
+ @torch.no_grad()
110
+ def step(self, closure=None, fp16_param_groups=None):
111
+ """Update the model parameters.
112
+
113
+ .. note::
114
+ This method will be called internally by ZeRO-Offload. DeepSpeed
115
+ users should still use ``engine.step()`` as shown in the
116
+ `Getting Started
117
+ <https://www.deepspeed.ai/getting-started/#training>`_ guide.
118
+
119
+ Args:
120
+ closure (callable, optional): closure to compute the loss.
121
+ Defaults to ``None``.
122
+ fp16_param_groups: FP16 GPU parameters to update. Performing the
123
+ copy here reduces communication time. Defaults to ``None``.
124
+
125
+ Returns:
126
+ loss: if ``closure`` is provided. Otherwise ``None``.
127
+ """
128
+
129
+ loss = None
130
+ if closure is not None:
131
+ with torch.enable_grad():
132
+ loss = closure()
133
+
134
+ # intended device for step
135
+ device = torch.device('cpu')
136
+
137
+ # converting the fp16 params to a group of parameter
138
+ if type(fp16_param_groups) is list:
139
+ if type(fp16_param_groups[0]) is not list:
140
+ fp16_param_groups = [fp16_param_groups]
141
+ elif fp16_param_groups is not None:
142
+ fp16_param_groups = [[fp16_param_groups]]
143
+
144
+ for group_id, group in enumerate(self.param_groups):
145
+ for param_id, p in enumerate(group['params']):
146
+
147
+ if p.grad is None:
148
+ continue
149
+
150
+ assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
151
+ "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
152
+
153
+ state = self.state[p]
154
+ # State initialization
155
+ if len(state) == 0:
156
+ #print(f'group {group_id} param {param_id} = {p.numel()}')
157
+ state['step'] = 0
158
+
159
+ #use full precision by default unless self.fp32_optimizer_states is off
160
+ state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
161
+
162
+ # gradient momentums
163
+ state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
164
+ #memory_format=torch.preserve_format)
165
+ # gradient variances
166
+ state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
167
+ #memory_format=torch.preserve_format)
168
+
169
+ state['step'] += 1
170
+ beta1, beta2 = group['betas']
171
+
172
+ if fp16_param_groups is not None:
173
+ self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
174
+ group['eps'], group['weight_decay'], group['bias_correction'],
175
+ p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'],
176
+ fp16_param_groups[group_id][param_id].data)
177
+ else:
178
+ self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
179
+ group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
180
+ state['exp_avg'], state['exp_avg_sq'])
181
+ return loss
venv/lib/python3.10/site-packages/deepspeed/ops/adam/fused_adam.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
8
+ """
9
+
10
+ import torch
11
+ from .multi_tensor_apply import MultiTensorApply
12
+
13
+ multi_tensor_applier = MultiTensorApply(2048 * 32)
14
+ from deepspeed.accelerator import get_accelerator
15
+ from deepspeed.ops.op_builder import FusedAdamBuilder
16
+
17
+
18
+ class FusedAdam(torch.optim.Optimizer):
19
+ """Implements Adam algorithm.
20
+
21
+ Currently GPU-only. Requires Apex to be installed via
22
+ ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
23
+
24
+ This version of fused Adam implements 2 fusions.
25
+
26
+ * Fusion of the Adam update's elementwise operations
27
+ * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
28
+
29
+ :class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
30
+ or ``torch.optim.Adam`` with ``adam_w_mode=False``::
31
+
32
+ opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
33
+ ...
34
+ opt.step()
35
+
36
+ :class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
37
+ you may choose any ``opt_level``::
38
+
39
+ opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
40
+ model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
41
+ ...
42
+ opt.step()
43
+
44
+ In general, ``opt_level="O1"`` is recommended.
45
+
46
+
47
+ .. warning::
48
+ A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
49
+ are now deprecated and unnecessary.
50
+
51
+ Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
52
+
53
+ Arguments:
54
+ params (iterable): iterable of parameters to optimize or dicts defining
55
+ parameter groups.
56
+ lr (float, optional): learning rate. (default: 1e-3)
57
+ betas (Tuple[float, float], optional): coefficients used for computing
58
+ running averages of gradient and its square. (default: (0.9, 0.999))
59
+ eps (float, optional): term added to the denominator to improve
60
+ numerical stability. (default: 1e-8)
61
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
62
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
63
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
64
+ (default: False) NOT SUPPORTED in FusedAdam!
65
+ adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
66
+ True for decoupled weight decay(also known as AdamW) (default: True)
67
+ set_grad_none (bool, optional): whether set grad to None when zero_grad()
68
+ method is called. (default: True)
69
+
70
+ .. _Adam - A Method for Stochastic Optimization:
71
+ https://arxiv.org/abs/1412.6980
72
+ .. _On the Convergence of Adam and Beyond:
73
+ https://openreview.net/forum?id=ryQu7f-RZ
74
+ """
75
+
76
+ def __init__(self,
77
+ params,
78
+ lr=1e-3,
79
+ bias_correction=True,
80
+ betas=(0.9, 0.999),
81
+ eps=1e-8,
82
+ adam_w_mode=True,
83
+ weight_decay=0.,
84
+ amsgrad=False,
85
+ set_grad_none=True):
86
+
87
+ if amsgrad:
88
+ raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
89
+ defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
90
+ super(FusedAdam, self).__init__(params, defaults)
91
+ self.adam_w_mode = 1 if adam_w_mode else 0
92
+ self.set_grad_none = set_grad_none
93
+
94
+ fused_adam_cuda = FusedAdamBuilder().load()
95
+ # Skip buffer
96
+ self._dummy_overflow_buf = get_accelerator().IntTensor([0])
97
+ self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
98
+
99
+ def zero_grad(self):
100
+ if self.set_grad_none:
101
+ for group in self.param_groups:
102
+ for p in group['params']:
103
+ p.grad = None
104
+ else:
105
+ super(FusedAdam, self).zero_grad()
106
+
107
+ def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
108
+ """Performs a single optimization step.
109
+
110
+ Arguments:
111
+ closure (callable, optional): A closure that reevaluates the model
112
+ and returns the loss.
113
+
114
+ The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
115
+ """
116
+ if any(p is not None for p in [grads, output_params, scale, grad_norms]):
117
+ raise RuntimeError(
118
+ 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
119
+ )
120
+ loss = None
121
+ if closure is not None:
122
+ loss = closure()
123
+
124
+ for group in self.param_groups:
125
+ if len(group['params']) == 0:
126
+ continue
127
+ bias_correction = 1 if group['bias_correction'] else 0
128
+ beta1, beta2 = group['betas']
129
+
130
+ # assume same step across group now to simplify things
131
+ # per parameter step can be easily support by making it tensor, or pass list into kernel
132
+ if 'step' not in group:
133
+ group['step'] = 0
134
+
135
+ # create lists for multi-tensor apply
136
+ g_16, p_16, m_16, v_16 = [], [], [], []
137
+ g_bf, p_bf, m_bf, v_bf = [], [], [], []
138
+ g_32, p_32, m_32, v_32 = [], [], [], []
139
+
140
+ for p in group['params']:
141
+ if p.grad is None:
142
+ continue
143
+ if p.grad.data.is_sparse:
144
+ raise RuntimeError(
145
+ 'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
146
+
147
+ state = self.state[p]
148
+ # State initialization
149
+ if len(state) == 0:
150
+ # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
151
+ # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
152
+ # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
153
+ state['step'] = group.get('step', 0)
154
+ # Exponential moving average of gradient values
155
+ state['exp_avg'] = torch.zeros_like(p.data)
156
+ # Exponential moving average of squared gradient values
157
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
158
+
159
+ if p.dtype == torch.float16:
160
+ g_16.append(p.grad.data)
161
+ p_16.append(p.data)
162
+ m_16.append(state['exp_avg'])
163
+ v_16.append(state['exp_avg_sq'])
164
+ elif p.dtype == torch.bfloat16:
165
+ g_bf.append(p.grad)
166
+ p_bf.append(p)
167
+ m_bf.append(state['exp_avg'])
168
+ v_bf.append(state['exp_avg_sq'])
169
+ elif p.dtype == torch.float32:
170
+ g_32.append(p.grad.data)
171
+ p_32.append(p.data)
172
+ m_32.append(state['exp_avg'])
173
+ v_32.append(state['exp_avg_sq'])
174
+ else:
175
+ raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
176
+
177
+ if len(g_16) > 0:
178
+ state['step'] += 1
179
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
180
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
181
+ bias_correction, group['weight_decay'])
182
+
183
+ if len(g_bf) > 0:
184
+ state['step'] += 1
185
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
186
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
187
+ bias_correction, group['weight_decay'])
188
+
189
+ if len(g_32) > 0:
190
+ state['step'] += 1
191
+ multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
192
+ group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
193
+ bias_correction, group['weight_decay'])
194
+
195
+ return loss
venv/lib/python3.10/site-packages/deepspeed/ops/adam/multi_tensor_apply.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from NVIDIA/apex, commit a109f85
8
+ """
9
+
10
+
11
+ class MultiTensorApply(object):
12
+
13
+ def __init__(self, chunk_size):
14
+ self.chunk_size = chunk_size
15
+
16
+ def __call__(self, op, noop_flag_buffer, tensor_lists, *args):
17
+ return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (297 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/__pycache__/evoformer_attn.cpython-310.pyc ADDED
Binary file (3.79 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/deepspeed4science/evoformer_attn.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import numpy as np
8
+ from deepspeed.ops.op_builder import EvoformerAttnBuilder
9
+ from deepspeed.accelerator import get_accelerator
10
+
11
+ kernel_ = None
12
+
13
+
14
+ def _attention(Q, K, V, bias1, bias2):
15
+ assert Q.shape[-3] > 16, "seq_len must be greater than 16"
16
+ O = torch.empty_like(Q, dtype=Q.dtype)
17
+ assert get_accelerator().on_accelerator(Q), "Q must be on cuda"
18
+ assert get_accelerator().on_accelerator(K), "K must be on cuda"
19
+ assert get_accelerator().on_accelerator(V), "V must be on cuda"
20
+ assert get_accelerator().on_accelerator(bias1), "bias1 must be on cuda"
21
+ assert get_accelerator().on_accelerator(bias2), "bias2 must be on cuda"
22
+ global kernel_
23
+ if kernel_ is None:
24
+ kernel_ = EvoformerAttnBuilder().load()
25
+ nheads = Q.shape[-2]
26
+ nq = (Q.shape[-3] + 31) // 32 * 32
27
+ nb = np.prod(Q.shape[:-3])
28
+ lse = torch.empty((nb, nheads, nq), dtype=torch.float32, device=Q.device)
29
+ kernel_.attention(Q, K, V, bias1, bias2, O, lse)
30
+ return O, lse
31
+
32
+
33
+ def attention_bwd(dO, Q, K, V, O, lse, bias1, bias2, bias1_grad, bias2_grad):
34
+ assert max(Q.shape[-1], V.shape[-1]) <= 64, "Hidden size is too large. Need to change kMax to a larger value"
35
+ dQ = torch.empty_like(Q, dtype=Q.dtype)
36
+ dK = torch.empty_like(K, dtype=K.dtype)
37
+ dV = torch.empty_like(V, dtype=V.dtype)
38
+ assert get_accelerator().on_accelerator(dO), "dO must be on cuda"
39
+ assert get_accelerator().on_accelerator(Q), "Q must be on cuda"
40
+ assert get_accelerator().on_accelerator(K), "K must be on cuda"
41
+ assert get_accelerator().on_accelerator(V), "V must be on cuda"
42
+ assert get_accelerator().on_accelerator(O), "O must be on cuda"
43
+ global kernel_
44
+ if kernel_ is None:
45
+ kernel_ = EvoformerAttnBuilder().load()
46
+ delta = torch.empty_like(lse)
47
+ if bias1_grad:
48
+ dB1 = torch.zeros_like(bias1, dtype=torch.float32)
49
+ else:
50
+ dB1 = torch.tensor([], dtype=torch.float32, device=bias1.device)
51
+ if bias2_grad:
52
+ dB2 = torch.zeros_like(bias2, dtype=torch.float32)
53
+ else:
54
+ dB2 = torch.tensor([], dtype=torch.float32, device=bias2.device)
55
+ kernel_.attention_bwd(dO, Q, K, V, O, lse, delta, bias1, bias2, dQ, dK, dV, dB1, dB2)
56
+ return dQ, dK, dV, dB1.to(dO.dtype), dB2.to(dO.dtype)
57
+
58
+
59
+ class EvoformerFusedAttention(torch.autograd.Function):
60
+
61
+ @staticmethod
62
+ def forward(ctx, q, k, v, bias1=None, bias2=None):
63
+ """
64
+ q, k, v: are in shape [*, L, H, D]
65
+ """
66
+ bias1_ = bias1.contiguous() if bias1 is not None else torch.tensor([], dtype=q.dtype, device=q.device)
67
+ bias2_ = bias2.contiguous() if bias2 is not None else torch.tensor([], dtype=q.dtype, device=q.device)
68
+ q = q.contiguous()
69
+ k = k.contiguous()
70
+ v = v.contiguous()
71
+ o, lse = _attention(q, k, v, bias1_, bias2_)
72
+ ctx.save_for_backward(q, k, v, o, lse, bias1_, bias2_)
73
+ return o
74
+
75
+ @staticmethod
76
+ def backward(ctx, grad_output):
77
+ q, k, v, o, lse, bias1, bias2 = ctx.saved_tensors
78
+ is_b1_grad = bias1.numel() != 0 and ctx.needs_input_grad[3]
79
+ is_b2_grad = bias2.numel() != 0 and ctx.needs_input_grad[4]
80
+ dQ, dK, dV, dB1, dB2 = attention_bwd(grad_output, q, k, v, o, lse, bias1, bias2, is_b1_grad, is_b2_grad)
81
+ if not is_b1_grad:
82
+ dB1 = None
83
+ if not is_b2_grad:
84
+ dB2 = None
85
+ return dQ, dK, dV, dB1, dB2
86
+
87
+
88
+ def DS4Sci_EvoformerAttention(Q, K, V, biases):
89
+ assert len(biases) <= 2
90
+
91
+ if (len(biases) == 0):
92
+ biases.append(None)
93
+
94
+ if (len(biases) == 1):
95
+ biases.append(None)
96
+
97
+ bias_1_shape = lambda x: (x.shape[0], x.shape[1], 1, 1, x.shape[2])
98
+ bias_2_shape = lambda x: (x.shape[0], 1, x.shape[3], x.shape[2], x.shape[2])
99
+
100
+ if biases[0] is not None:
101
+ assert biases[0].shape == bias_1_shape(Q), "bias1 shape is incorrect"
102
+
103
+ if biases[1] is not None:
104
+ assert biases[1].shape == bias_2_shape(Q), "bias2 shape is incorrect"
105
+
106
+ return EvoformerFusedAttention.apply(Q, K, V, biases[0], biases[1])
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+ import os
8
+ import pkgutil
9
+ import importlib
10
+
11
+ from .builder import get_default_compute_capabilities, OpBuilder
12
+
13
+ # Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder
14
+ __deepspeed__ = True
15
+
16
+ # List of all available op builders from deepspeed op_builder
17
+ try:
18
+ import deepspeed.ops.op_builder # noqa: F401 # type: ignore
19
+ op_builder_dir = "deepspeed.ops.op_builder"
20
+ except ImportError:
21
+ op_builder_dir = "op_builder"
22
+
23
+ __op_builders__ = []
24
+
25
+ this_module = sys.modules[__name__]
26
+
27
+
28
+ def builder_closure(member_name):
29
+ if op_builder_dir == "op_builder":
30
+ # during installation time cannot get builder due to torch not installed,
31
+ # return closure instead
32
+ def _builder():
33
+ from deepspeed.accelerator import get_accelerator
34
+ builder = get_accelerator().create_op_builder(member_name)
35
+ return builder
36
+
37
+ return _builder
38
+ else:
39
+ # during runtime, return op builder class directly
40
+ from deepspeed.accelerator import get_accelerator
41
+ builder = get_accelerator().get_op_builder(member_name)
42
+ return builder
43
+
44
+
45
+ # reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator
46
+ for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]):
47
+ if module_name != 'all_ops' and module_name != 'builder':
48
+ module = importlib.import_module(f".{module_name}", package=op_builder_dir)
49
+ for member_name in module.__dir__():
50
+ if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder":
51
+ # assign builder name to variable with same name
52
+ # the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder"
53
+ this_module.__dict__[member_name] = builder_closure(member_name)
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/all_ops.cpython-310.pyc ADDED
Binary file (954 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/async_io.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/builder.cpython-310.pyc ADDED
Binary file (23.5 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adagrad.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/cpu_lion.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/evoformer_attn.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fp_quantizer.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lamb.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/fused_lion.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_core_ops.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/inference_cutlass_builder.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/quantizer.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_ops.cpython-310.pyc ADDED
Binary file (4.82 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/ragged_utils.cpython-310.pyc ADDED
Binary file (3.26 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/random_ltd.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/sparse_attn.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/spatial_inference.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/stochastic_transformer.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/__pycache__/transformer_inference.cpython-310.pyc ADDED
Binary file (3.02 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/async_io.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import distutils.spawn
7
+ import subprocess
8
+
9
+ from .builder import OpBuilder
10
+
11
+
12
+ class AsyncIOBuilder(OpBuilder):
13
+ BUILD_VAR = "DS_BUILD_AIO"
14
+ NAME = "async_io"
15
+
16
+ def __init__(self):
17
+ super().__init__(name=self.NAME)
18
+
19
+ def absolute_name(self):
20
+ return f'deepspeed.ops.aio.{self.NAME}_op'
21
+
22
+ def sources(self):
23
+ return [
24
+ 'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp',
25
+ 'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp',
26
+ 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp',
27
+ 'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp',
28
+ 'csrc/aio/py_lib/deepspeed_pin_tensor.cpp'
29
+ ]
30
+
31
+ def include_paths(self):
32
+ return ['csrc/aio/py_lib', 'csrc/aio/common']
33
+
34
+ def cxx_args(self):
35
+ # -O0 for improved debugging, since performance is bound by I/O
36
+ CPU_ARCH = self.cpu_arch()
37
+ SIMD_WIDTH = self.simd_width()
38
+ import torch # Keep this import here to avoid errors when building DeepSpeed wheel without torch installed
39
+ TORCH_MAJOR, TORCH_MINOR = map(int, torch.__version__.split('.')[0:2])
40
+ if TORCH_MAJOR >= 2 and TORCH_MINOR >= 1:
41
+ CPP_STD = '-std=c++17'
42
+ else:
43
+ CPP_STD = '-std=c++14'
44
+ return [
45
+ '-g',
46
+ '-Wall',
47
+ '-O0',
48
+ CPP_STD,
49
+ '-shared',
50
+ '-fPIC',
51
+ '-Wno-reorder',
52
+ CPU_ARCH,
53
+ '-fopenmp',
54
+ SIMD_WIDTH,
55
+ '-laio',
56
+ ]
57
+
58
+ def extra_ldflags(self):
59
+ return ['-laio']
60
+
61
+ def check_for_libaio_pkg(self):
62
+ libs = dict(
63
+ dpkg=["-l", "libaio-dev", "apt"],
64
+ pacman=["-Q", "libaio", "pacman"],
65
+ rpm=["-q", "libaio-devel", "yum"],
66
+ )
67
+
68
+ found = False
69
+ for pkgmgr, data in libs.items():
70
+ flag, lib, tool = data
71
+ path = distutils.spawn.find_executable(pkgmgr)
72
+ if path is not None:
73
+ cmd = f"{pkgmgr} {flag} {lib}"
74
+ result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
75
+ if result.wait() == 0:
76
+ found = True
77
+ else:
78
+ self.warning(f"{self.NAME}: please install the {lib} package with {tool}")
79
+ break
80
+ return found
81
+
82
+ def is_compatible(self, verbose=True):
83
+ # Check for the existence of libaio by using distutils
84
+ # to compile and link a test program that calls io_submit,
85
+ # which is a function provided by libaio that is used in the async_io op.
86
+ # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS
87
+ # respectively to specify the directories for libaio.h and libaio.so.
88
+ aio_compatible = self.has_function('io_pgetevents', ('aio', ))
89
+ if verbose and not aio_compatible:
90
+ self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.")
91
+
92
+ # Check for the libaio package via known package managers
93
+ # to print suggestions on which package to install.
94
+ self.check_for_libaio_pkg()
95
+
96
+ self.warning(
97
+ "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found."
98
+ )
99
+ return super().is_compatible(verbose) and aio_compatible
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/builder.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import sys
8
+ import time
9
+ import importlib
10
+ from pathlib import Path
11
+ import subprocess
12
+ import shlex
13
+ import shutil
14
+ import tempfile
15
+ import distutils.ccompiler
16
+ import distutils.log
17
+ import distutils.sysconfig
18
+ from distutils.errors import CompileError, LinkError
19
+ from abc import ABC, abstractmethod
20
+ from typing import List
21
+
22
+ YELLOW = '\033[93m'
23
+ END = '\033[0m'
24
+ WARNING = f"{YELLOW} [WARNING] {END}"
25
+
26
+ DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions"
27
+ DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
28
+
29
+ try:
30
+ import torch
31
+ except ImportError:
32
+ print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.")
33
+ else:
34
+ TORCH_MAJOR = int(torch.__version__.split('.')[0])
35
+ TORCH_MINOR = int(torch.__version__.split('.')[1])
36
+
37
+
38
+ class MissingCUDAException(Exception):
39
+ pass
40
+
41
+
42
+ class CUDAMismatchException(Exception):
43
+ pass
44
+
45
+
46
+ def installed_cuda_version(name=""):
47
+ import torch.utils.cpp_extension
48
+ cuda_home = torch.utils.cpp_extension.CUDA_HOME
49
+ if cuda_home is None:
50
+ raise MissingCUDAException("CUDA_HOME does not exist, unable to compile CUDA op(s)")
51
+ # Ensure there is not a cuda version mismatch between torch and nvcc compiler
52
+ output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
53
+ output_split = output.split()
54
+ release_idx = output_split.index("release")
55
+ release = output_split[release_idx + 1].replace(',', '').split(".")
56
+ # Ignore patch versions, only look at major + minor
57
+ cuda_major, cuda_minor = release[:2]
58
+ return int(cuda_major), int(cuda_minor)
59
+
60
+
61
+ def get_default_compute_capabilities():
62
+ compute_caps = DEFAULT_COMPUTE_CAPABILITIES
63
+ import torch.utils.cpp_extension
64
+ if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11:
65
+ if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
66
+ # Special treatment of CUDA 11.0 because compute_86 is not supported.
67
+ compute_caps += ";8.0"
68
+ else:
69
+ compute_caps += ";8.0;8.6"
70
+ return compute_caps
71
+
72
+
73
+ # list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used
74
+ # to build deepspeed and system-wide installed cuda 11.2
75
+ cuda_minor_mismatch_ok = {
76
+ 10: ["10.0", "10.1", "10.2"],
77
+ 11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"],
78
+ 12: ["12.0", "12.1", "12.2", "12.3"],
79
+ }
80
+
81
+
82
+ def assert_no_cuda_mismatch(name=""):
83
+ cuda_major, cuda_minor = installed_cuda_version(name)
84
+ sys_cuda_version = f'{cuda_major}.{cuda_minor}'
85
+ torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
86
+ # This is a show-stopping error, should probably not proceed past this
87
+ if sys_cuda_version != torch_cuda_version:
88
+ if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
89
+ and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
90
+ print(f"Installed CUDA version {sys_cuda_version} does not match the "
91
+ f"version torch was compiled with {torch.version.cuda} "
92
+ "but since the APIs are compatible, accepting this combination")
93
+ return True
94
+ elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1":
95
+ print(
96
+ f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
97
+ f"version torch was compiled with {torch.version.cuda}."
98
+ "Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior."
99
+ )
100
+ return True
101
+ raise CUDAMismatchException(
102
+ f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
103
+ f"version torch was compiled with {torch.version.cuda}, unable to compile "
104
+ "cuda/cpp extensions without a matching cuda version.")
105
+ return True
106
+
107
+
108
+ class OpBuilder(ABC):
109
+ _rocm_version = None
110
+ _is_rocm_pytorch = None
111
+ _is_sycl_enabled = None
112
+ _loaded_ops = {}
113
+
114
+ def __init__(self, name):
115
+ self.name = name
116
+ self.jit_mode = False
117
+ self.build_for_cpu = False
118
+ self.enable_bf16 = False
119
+ self.error_log = None
120
+
121
+ @abstractmethod
122
+ def absolute_name(self):
123
+ '''
124
+ Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam
125
+ will be installed as something like: deepspeed/ops/adam/cpu_adam.so
126
+ '''
127
+ pass
128
+
129
+ @abstractmethod
130
+ def sources(self):
131
+ '''
132
+ Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
133
+ '''
134
+ pass
135
+
136
+ def hipify_extension(self):
137
+ pass
138
+
139
+ def sycl_extension(self):
140
+ pass
141
+
142
+ @staticmethod
143
+ def validate_torch_version(torch_info):
144
+ install_torch_version = torch_info['version']
145
+ current_torch_version = ".".join(torch.__version__.split('.')[:2])
146
+ if install_torch_version != current_torch_version:
147
+ raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed "
148
+ "with a different version than what is being used at runtime. "
149
+ f"Please re-install DeepSpeed or switch torch versions. "
150
+ f"Install torch version={install_torch_version}, "
151
+ f"Runtime torch version={current_torch_version}")
152
+
153
+ @staticmethod
154
+ def validate_torch_op_version(torch_info):
155
+ if not OpBuilder.is_rocm_pytorch():
156
+ current_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
157
+ install_cuda_version = torch_info['cuda_version']
158
+ if install_cuda_version != current_cuda_version:
159
+ raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed "
160
+ "with a different version than what is being used at runtime. "
161
+ f"Please re-install DeepSpeed or switch torch versions. "
162
+ f"Install CUDA version={install_cuda_version}, "
163
+ f"Runtime CUDA version={current_cuda_version}")
164
+ else:
165
+ current_hip_version = ".".join(torch.version.hip.split('.')[:2])
166
+ install_hip_version = torch_info['hip_version']
167
+ if install_hip_version != current_hip_version:
168
+ raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed "
169
+ "with a different version than what is being used at runtime. "
170
+ f"Please re-install DeepSpeed or switch torch versions. "
171
+ f"Install HIP version={install_hip_version}, "
172
+ f"Runtime HIP version={current_hip_version}")
173
+
174
+ @staticmethod
175
+ def is_rocm_pytorch():
176
+ if OpBuilder._is_rocm_pytorch is not None:
177
+ return OpBuilder._is_rocm_pytorch
178
+
179
+ _is_rocm_pytorch = False
180
+ try:
181
+ import torch
182
+ except ImportError:
183
+ pass
184
+ else:
185
+ if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
186
+ _is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
187
+ if _is_rocm_pytorch:
188
+ from torch.utils.cpp_extension import ROCM_HOME
189
+ _is_rocm_pytorch = ROCM_HOME is not None
190
+ OpBuilder._is_rocm_pytorch = _is_rocm_pytorch
191
+ return OpBuilder._is_rocm_pytorch
192
+
193
+ @staticmethod
194
+ def is_sycl_enabled():
195
+ if OpBuilder._is_sycl_enabled is not None:
196
+ return OpBuilder._is_sycl_enabled
197
+
198
+ _is_sycl_enabled = False
199
+ try:
200
+ result = subprocess.run(["c2s", "--version"], capture_output=True)
201
+ except:
202
+ pass
203
+ else:
204
+ _is_sycl_enabled = True
205
+
206
+ OpBuilder._is_sycl_enabled = _is_sycl_enabled
207
+ return OpBuilder._is_sycl_enabled
208
+
209
+ @staticmethod
210
+ def installed_rocm_version():
211
+ if OpBuilder._rocm_version:
212
+ return OpBuilder._rocm_version
213
+
214
+ ROCM_MAJOR = '0'
215
+ ROCM_MINOR = '0'
216
+ if OpBuilder.is_rocm_pytorch():
217
+ from torch.utils.cpp_extension import ROCM_HOME
218
+ rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev")
219
+ if rocm_ver_file.is_file():
220
+ with open(rocm_ver_file, 'r') as file:
221
+ ROCM_VERSION_DEV_RAW = file.read()
222
+ elif "rocm" in torch.__version__:
223
+ ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1]
224
+ else:
225
+ assert False, "Could not detect ROCm version"
226
+ assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version"
227
+ ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0]
228
+ ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1]
229
+ OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR))
230
+ return OpBuilder._rocm_version
231
+
232
+ def include_paths(self):
233
+ '''
234
+ Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
235
+ '''
236
+ return []
237
+
238
+ def nvcc_args(self):
239
+ '''
240
+ Returns optional list of compiler flags to forward to nvcc when building CUDA sources
241
+ '''
242
+ return []
243
+
244
+ def cxx_args(self):
245
+ '''
246
+ Returns optional list of compiler flags to forward to the build
247
+ '''
248
+ return []
249
+
250
+ def is_compatible(self, verbose=True):
251
+ '''
252
+ Check if all non-python dependencies are satisfied to build this op
253
+ '''
254
+ return True
255
+
256
+ def extra_ldflags(self):
257
+ return []
258
+
259
+ def has_function(self, funcname, libraries, verbose=False):
260
+ '''
261
+ Test for existence of a function within a tuple of libraries.
262
+
263
+ This is used as a smoke test to check whether a certain library is available.
264
+ As a test, this creates a simple C program that calls the specified function,
265
+ and then distutils is used to compile that program and link it with the specified libraries.
266
+ Returns True if both the compile and link are successful, False otherwise.
267
+ '''
268
+ tempdir = None # we create a temporary directory to hold various files
269
+ filestderr = None # handle to open file to which we redirect stderr
270
+ oldstderr = None # file descriptor for stderr
271
+ try:
272
+ # Echo compile and link commands that are used.
273
+ if verbose:
274
+ distutils.log.set_verbosity(1)
275
+
276
+ # Create a compiler object.
277
+ compiler = distutils.ccompiler.new_compiler(verbose=verbose)
278
+
279
+ # Configure compiler and linker to build according to Python install.
280
+ distutils.sysconfig.customize_compiler(compiler)
281
+
282
+ # Create a temporary directory to hold test files.
283
+ tempdir = tempfile.mkdtemp()
284
+
285
+ # Define a simple C program that calls the function in question
286
+ prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname)
287
+
288
+ # Write the test program to a file.
289
+ filename = os.path.join(tempdir, 'test.c')
290
+ with open(filename, 'w') as f:
291
+ f.write(prog)
292
+
293
+ # Redirect stderr file descriptor to a file to silence compile/link warnings.
294
+ if not verbose:
295
+ filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w')
296
+ oldstderr = os.dup(sys.stderr.fileno())
297
+ os.dup2(filestderr.fileno(), sys.stderr.fileno())
298
+
299
+ # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames()
300
+ # Otherwise, a local directory will be used instead of tempdir
301
+ drive, driveless_filename = os.path.splitdrive(filename)
302
+ root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else ''
303
+ output_dir = os.path.join(drive, root_dir)
304
+
305
+ # Attempt to compile the C program into an object file.
306
+ cflags = shlex.split(os.environ.get('CFLAGS', ""))
307
+ objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags))
308
+
309
+ # Attempt to link the object file into an executable.
310
+ # Be sure to tack on any libraries that have been specified.
311
+ ldflags = shlex.split(os.environ.get('LDFLAGS', ""))
312
+ compiler.link_executable(objs,
313
+ os.path.join(tempdir, 'a.out'),
314
+ extra_preargs=self.strip_empty_entries(ldflags),
315
+ libraries=libraries)
316
+
317
+ # Compile and link succeeded
318
+ return True
319
+
320
+ except CompileError:
321
+ return False
322
+
323
+ except LinkError:
324
+ return False
325
+
326
+ except:
327
+ return False
328
+
329
+ finally:
330
+ # Restore stderr file descriptor and close the stderr redirect file.
331
+ if oldstderr is not None:
332
+ os.dup2(oldstderr, sys.stderr.fileno())
333
+ if filestderr is not None:
334
+ filestderr.close()
335
+
336
+ # Delete the temporary directory holding the test program and stderr files.
337
+ if tempdir is not None:
338
+ shutil.rmtree(tempdir)
339
+
340
+ def strip_empty_entries(self, args):
341
+ '''
342
+ Drop any empty strings from the list of compile and link flags
343
+ '''
344
+ return [x for x in args if len(x) > 0]
345
+
346
+ def cpu_arch(self):
347
+ try:
348
+ from cpuinfo import get_cpu_info
349
+ except ImportError as e:
350
+ cpu_info = self._backup_cpuinfo()
351
+ if cpu_info is None:
352
+ return "-march=native"
353
+
354
+ try:
355
+ cpu_info = get_cpu_info()
356
+ except Exception as e:
357
+ self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
358
+ "falling back to `lscpu` to get this information.")
359
+ cpu_info = self._backup_cpuinfo()
360
+ if cpu_info is None:
361
+ return "-march=native"
362
+
363
+ if cpu_info['arch'].startswith('PPC_'):
364
+ # gcc does not provide -march on PowerPC, use -mcpu instead
365
+ return '-mcpu=native'
366
+ return '-march=native'
367
+
368
+ def is_cuda_enable(self):
369
+ try:
370
+ assert_no_cuda_mismatch(self.name)
371
+ return '-D__ENABLE_CUDA__'
372
+ except MissingCUDAException:
373
+ print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, "
374
+ "only cpu ops can be compiled!")
375
+ return '-D__DISABLE_CUDA__'
376
+ return '-D__DISABLE_CUDA__'
377
+
378
+ def _backup_cpuinfo(self):
379
+ # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides
380
+ if not self.command_exists('lscpu'):
381
+ self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo "
382
+ "to detect the CPU architecture. 'lscpu' does not appear to exist on "
383
+ "your system, will fall back to use -march=native and non-vectorized execution.")
384
+ return None
385
+ result = subprocess.check_output('lscpu', shell=True)
386
+ result = result.decode('utf-8').strip().lower()
387
+
388
+ cpu_info = {}
389
+ cpu_info['arch'] = None
390
+ cpu_info['flags'] = ""
391
+ if 'genuineintel' in result or 'authenticamd' in result:
392
+ cpu_info['arch'] = 'X86_64'
393
+ if 'avx512' in result:
394
+ cpu_info['flags'] += 'avx512,'
395
+ elif 'avx512f' in result:
396
+ cpu_info['flags'] += 'avx512f,'
397
+ if 'avx2' in result:
398
+ cpu_info['flags'] += 'avx2'
399
+ elif 'ppc64le' in result:
400
+ cpu_info['arch'] = "PPC_"
401
+
402
+ return cpu_info
403
+
404
+ def simd_width(self):
405
+ try:
406
+ from cpuinfo import get_cpu_info
407
+ except ImportError as e:
408
+ cpu_info = self._backup_cpuinfo()
409
+ if cpu_info is None:
410
+ return '-D__SCALAR__'
411
+
412
+ try:
413
+ cpu_info = get_cpu_info()
414
+ except Exception as e:
415
+ self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
416
+ "falling back to `lscpu` to get this information.")
417
+ cpu_info = self._backup_cpuinfo()
418
+ if cpu_info is None:
419
+ return '-D__SCALAR__'
420
+
421
+ if cpu_info['arch'] == 'X86_64':
422
+ if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']:
423
+ return '-D__AVX512__'
424
+ elif 'avx2' in cpu_info['flags']:
425
+ return '-D__AVX256__'
426
+ return '-D__SCALAR__'
427
+
428
+ def command_exists(self, cmd):
429
+ if '|' in cmd:
430
+ cmds = cmd.split("|")
431
+ else:
432
+ cmds = [cmd]
433
+ valid = False
434
+ for cmd in cmds:
435
+ result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
436
+ valid = valid or result.wait() == 0
437
+
438
+ if not valid and len(cmds) > 1:
439
+ print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!")
440
+ elif not valid and len(cmds) == 1:
441
+ print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!")
442
+ return valid
443
+
444
+ def warning(self, msg):
445
+ self.error_log = f"{msg}"
446
+ print(f"{WARNING} {msg}")
447
+
448
+ def deepspeed_src_path(self, code_path):
449
+ if os.path.isabs(code_path):
450
+ return code_path
451
+ else:
452
+ return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
453
+
454
+ def builder(self):
455
+ from torch.utils.cpp_extension import CppExtension
456
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
457
+ return CppExtension(name=self.absolute_name(),
458
+ sources=self.strip_empty_entries(self.sources()),
459
+ include_dirs=include_dirs,
460
+ extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
461
+ extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
462
+
463
+ def load(self, verbose=True):
464
+ if self.name in __class__._loaded_ops:
465
+ return __class__._loaded_ops[self.name]
466
+
467
+ from deepspeed.git_version_info import installed_ops, torch_info, accelerator_name
468
+ from deepspeed.accelerator import get_accelerator
469
+ if installed_ops.get(self.name, False) and accelerator_name == get_accelerator()._name:
470
+ # Ensure the op we're about to load was compiled with the same
471
+ # torch/cuda versions we are currently using at runtime.
472
+ self.validate_torch_version(torch_info)
473
+ if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder):
474
+ self.validate_torch_op_version(torch_info)
475
+
476
+ op_module = importlib.import_module(self.absolute_name())
477
+ __class__._loaded_ops[self.name] = op_module
478
+ return op_module
479
+ else:
480
+ return self.jit_load(verbose)
481
+
482
+ def jit_load(self, verbose=True):
483
+ if not self.is_compatible(verbose):
484
+ raise RuntimeError(
485
+ f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}"
486
+ )
487
+ try:
488
+ import ninja # noqa: F401 # type: ignore
489
+ except ImportError:
490
+ raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.")
491
+
492
+ if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch():
493
+ self.build_for_cpu = not torch.cuda.is_available()
494
+
495
+ self.jit_mode = True
496
+ from torch.utils.cpp_extension import load
497
+
498
+ start_build = time.time()
499
+ sources = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.sources()]
500
+ extra_include_paths = [os.path.abspath(self.deepspeed_src_path(path)) for path in self.include_paths()]
501
+
502
+ # Torch will try and apply whatever CCs are in the arch list at compile time,
503
+ # we have already set the intended targets ourselves we know that will be
504
+ # needed at runtime. This prevents CC collisions such as multiple __half
505
+ # implementations. Stash arch list to reset after build.
506
+ torch_arch_list = None
507
+ if "TORCH_CUDA_ARCH_LIST" in os.environ:
508
+ torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST")
509
+ os.environ["TORCH_CUDA_ARCH_LIST"] = ""
510
+
511
+ nvcc_args = self.strip_empty_entries(self.nvcc_args())
512
+ cxx_args = self.strip_empty_entries(self.cxx_args())
513
+
514
+ if isinstance(self, CUDAOpBuilder):
515
+ if not self.build_for_cpu and self.enable_bf16:
516
+ cxx_args.append("-DBF16_AVAILABLE")
517
+ nvcc_args.append("-DBF16_AVAILABLE")
518
+ nvcc_args.append("-U__CUDA_NO_BFLOAT16_OPERATORS__")
519
+ nvcc_args.append("-U__CUDA_NO_BFLOAT162_OPERATORS__")
520
+
521
+ if self.is_rocm_pytorch():
522
+ cxx_args.append("-D__HIP_PLATFORM_AMD__=1")
523
+
524
+ op_module = load(name=self.name,
525
+ sources=self.strip_empty_entries(sources),
526
+ extra_include_paths=self.strip_empty_entries(extra_include_paths),
527
+ extra_cflags=cxx_args,
528
+ extra_cuda_cflags=nvcc_args,
529
+ extra_ldflags=self.strip_empty_entries(self.extra_ldflags()),
530
+ verbose=verbose)
531
+
532
+ build_duration = time.time() - start_build
533
+ if verbose:
534
+ print(f"Time to load {self.name} op: {build_duration} seconds")
535
+
536
+ # Reset arch list so we are not silently removing it for other possible use cases
537
+ if torch_arch_list:
538
+ os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list
539
+
540
+ __class__._loaded_ops[self.name] = op_module
541
+
542
+ return op_module
543
+
544
+
545
+ class CUDAOpBuilder(OpBuilder):
546
+
547
+ def compute_capability_args(self, cross_compile_archs=None):
548
+ """
549
+ Returns nvcc compute capability compile flags.
550
+
551
+ 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`.
552
+ 2. If neither is set default compute capabilities will be used
553
+ 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX
554
+
555
+ Format:
556
+
557
+ - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples:
558
+
559
+ TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ...
560
+ TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ...
561
+
562
+ - `cross_compile_archs` uses ; separator.
563
+
564
+ """
565
+ ccs = []
566
+ if self.jit_mode:
567
+ # Compile for underlying architectures since we know those at runtime
568
+ for i in range(torch.cuda.device_count()):
569
+ CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i)
570
+ cc = f"{CC_MAJOR}.{CC_MINOR}"
571
+ if cc not in ccs:
572
+ ccs.append(cc)
573
+ ccs = sorted(ccs)
574
+ ccs[-1] += '+PTX'
575
+ else:
576
+ # Cross-compile mode, compile for various architectures
577
+ # env override takes priority
578
+ cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
579
+ if cross_compile_archs_env is not None:
580
+ if cross_compile_archs is not None:
581
+ print(
582
+ f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
583
+ )
584
+ cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
585
+ else:
586
+ if cross_compile_archs is None:
587
+ cross_compile_archs = get_default_compute_capabilities()
588
+ ccs = cross_compile_archs.split(';')
589
+
590
+ ccs = self.filter_ccs(ccs)
591
+ if len(ccs) == 0:
592
+ raise RuntimeError(
593
+ f"Unable to load {self.name} op due to no compute capabilities remaining after filtering")
594
+
595
+ args = []
596
+ self.enable_bf16 = True
597
+ for cc in ccs:
598
+ num = cc[0] + cc[2]
599
+ args.append(f'-gencode=arch=compute_{num},code=sm_{num}')
600
+ if cc.endswith('+PTX'):
601
+ args.append(f'-gencode=arch=compute_{num},code=compute_{num}')
602
+
603
+ if int(cc[0]) <= 7:
604
+ self.enable_bf16 = False
605
+
606
+ return args
607
+
608
+ def filter_ccs(self, ccs: List[str]):
609
+ """
610
+ Prune any compute capabilities that are not compatible with the builder. Should log
611
+ which CCs have been pruned.
612
+ """
613
+ return ccs
614
+
615
+ def version_dependent_macros(self):
616
+ # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
617
+ version_ge_1_1 = []
618
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
619
+ version_ge_1_1 = ['-DVERSION_GE_1_1']
620
+ version_ge_1_3 = []
621
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
622
+ version_ge_1_3 = ['-DVERSION_GE_1_3']
623
+ version_ge_1_5 = []
624
+ if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
625
+ version_ge_1_5 = ['-DVERSION_GE_1_5']
626
+ return version_ge_1_1 + version_ge_1_3 + version_ge_1_5
627
+
628
+ def is_compatible(self, verbose=True):
629
+ return super().is_compatible(verbose)
630
+
631
+ def builder(self):
632
+ try:
633
+ if not self.is_rocm_pytorch():
634
+ assert_no_cuda_mismatch(self.name)
635
+ self.build_for_cpu = False
636
+ except MissingCUDAException:
637
+ self.build_for_cpu = True
638
+
639
+ if self.build_for_cpu:
640
+ from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
641
+ else:
642
+ from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
643
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
644
+ compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
645
+ {'cxx': self.strip_empty_entries(self.cxx_args()), \
646
+ 'nvcc': self.strip_empty_entries(self.nvcc_args())}
647
+
648
+ if not self.build_for_cpu and self.enable_bf16:
649
+ compile_args['cxx'].append("-DBF16_AVAILABLE")
650
+
651
+ if self.is_rocm_pytorch():
652
+ compile_args['cxx'].append("-D__HIP_PLATFORM_AMD__=1")
653
+
654
+ cuda_ext = ExtensionBuilder(name=self.absolute_name(),
655
+ sources=self.strip_empty_entries(self.sources()),
656
+ include_dirs=include_dirs,
657
+ libraries=self.strip_empty_entries(self.libraries_args()),
658
+ extra_compile_args=compile_args,
659
+ extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
660
+
661
+ if self.is_rocm_pytorch():
662
+ # hip converts paths to absolute, this converts back to relative
663
+ sources = cuda_ext.sources
664
+ curr_file = Path(__file__).parent.parent # ds root
665
+ for i in range(len(sources)):
666
+ src = Path(sources[i])
667
+ if src.is_absolute():
668
+ sources[i] = str(src.relative_to(curr_file))
669
+ else:
670
+ sources[i] = str(src)
671
+ cuda_ext.sources = sources
672
+ return cuda_ext
673
+
674
+ def hipify_extension(self):
675
+ if self.is_rocm_pytorch():
676
+ from torch.utils.hipify import hipify_python
677
+ hipify_python.hipify(
678
+ project_directory=os.getcwd(),
679
+ output_directory=os.getcwd(),
680
+ header_include_dirs=self.include_paths(),
681
+ includes=[os.path.join(os.getcwd(), '*')],
682
+ extra_files=[os.path.abspath(s) for s in self.sources()],
683
+ show_detailed=True,
684
+ is_pytorch_extension=True,
685
+ hipify_extra_files_only=True,
686
+ )
687
+
688
+ def cxx_args(self):
689
+ if sys.platform == "win32":
690
+ return ['-O2']
691
+ else:
692
+ return ['-O3', '-std=c++17', '-g', '-Wno-reorder']
693
+
694
+ def nvcc_args(self):
695
+ if self.build_for_cpu:
696
+ return []
697
+ args = ['-O3']
698
+ if self.is_rocm_pytorch():
699
+ ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
700
+ args += [
701
+ '-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__',
702
+ '-U__HIP_NO_HALF2_OPERATORS__',
703
+ '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR,
704
+ '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
705
+ ]
706
+ else:
707
+ try:
708
+ nvcc_threads = int(os.getenv("DS_NVCC_THREADS", ""))
709
+ if nvcc_threads <= 0:
710
+ raise ValueError("")
711
+ except ValueError:
712
+ nvcc_threads = min(os.cpu_count(), 8)
713
+
714
+ cuda_major, _ = installed_cuda_version()
715
+ args += [
716
+ '-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
717
+ '-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__',
718
+ '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__', f'--threads={nvcc_threads}'
719
+ ]
720
+ if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
721
+ args.append('--ptxas-options=-v')
722
+ args += self.compute_capability_args()
723
+ return args
724
+
725
+ def libraries_args(self):
726
+ if self.build_for_cpu:
727
+ return []
728
+
729
+ if sys.platform == "win32":
730
+ return ['cublas', 'curand']
731
+ else:
732
+ return []
733
+
734
+
735
+ class TorchCPUOpBuilder(CUDAOpBuilder):
736
+
737
+ def extra_ldflags(self):
738
+ if self.build_for_cpu:
739
+ return ['-fopenmp']
740
+
741
+ if not self.is_rocm_pytorch():
742
+ return ['-lcurand']
743
+
744
+ return []
745
+
746
+ def cxx_args(self):
747
+ import torch
748
+ args = []
749
+ if not self.build_for_cpu:
750
+ if not self.is_rocm_pytorch():
751
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64")
752
+ if not os.path.exists(CUDA_LIB64):
753
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib")
754
+ else:
755
+ CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib")
756
+
757
+ args += super().cxx_args()
758
+ args += [
759
+ f'-L{CUDA_LIB64}',
760
+ '-lcudart',
761
+ '-lcublas',
762
+ '-g',
763
+ ]
764
+
765
+ CPU_ARCH = self.cpu_arch()
766
+ SIMD_WIDTH = self.simd_width()
767
+ CUDA_ENABLE = self.is_cuda_enable()
768
+ args += [
769
+ CPU_ARCH,
770
+ '-fopenmp',
771
+ SIMD_WIDTH,
772
+ CUDA_ENABLE,
773
+ ]
774
+
775
+ return args
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
6
+
7
+ from .comm import CCLCommBuilder, ShareMemCommBuilder
8
+ from .fused_adam import FusedAdamBuilder
9
+ from .cpu_adam import CPUAdamBuilder
10
+ from .no_impl import NotImplementedBuilder
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (475 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/builder.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/comm.cpython-310.pyc ADDED
Binary file (2.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/cpu_adam.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/fused_adam.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/__pycache__/no_impl.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/builder.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+ try:
9
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
10
+ # if successful this also means we're doing a local install and not JIT compile path
11
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
12
+ from op_builder.builder import OpBuilder
13
+ except ImportError:
14
+ from deepspeed.ops.op_builder.builder import OpBuilder
15
+
16
+
17
+ class CPUOpBuilder(OpBuilder):
18
+
19
+ def builder(self):
20
+ from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
21
+ include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())]
22
+ compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
23
+
24
+ cpp_ext = ExtensionBuilder(name=self.absolute_name(),
25
+ sources=self.strip_empty_entries(self.sources()),
26
+ include_dirs=include_dirs,
27
+ libraries=self.strip_empty_entries(self.libraries_args()),
28
+ extra_compile_args=compile_args)
29
+
30
+ return cpp_ext
31
+
32
+ def cxx_args(self):
33
+ return ['-O3', '-g', '-Wno-reorder']
34
+
35
+ def libraries_args(self):
36
+ return []
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/comm.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from .builder import CPUOpBuilder
8
+
9
+
10
+ class CCLCommBuilder(CPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CCL_COMM"
12
+ NAME = "deepspeed_ccl_comm"
13
+
14
+ def __init__(self, name=None):
15
+ name = self.NAME if name is None else name
16
+ super().__init__(name=name)
17
+
18
+ def absolute_name(self):
19
+ return f'deepspeed.ops.comm.{self.NAME}_op'
20
+
21
+ def sources(self):
22
+ return ['csrc/cpu/comm/ccl.cpp', 'csrc/cpu/comm/shm.cpp']
23
+
24
+ def include_paths(self):
25
+ includes = ['csrc/cpu/includes']
26
+ return includes
27
+
28
+ def cxx_args(self):
29
+ return ['-O2', '-fopenmp']
30
+
31
+ def is_compatible(self, verbose=True):
32
+ # TODO: add soft compatibility check for private binary release.
33
+ # a soft check, as in we know it can be trivially changed.
34
+ return super().is_compatible(verbose)
35
+
36
+ def extra_ldflags(self):
37
+ ccl_root_path = os.environ.get("CCL_ROOT")
38
+ if ccl_root_path is None:
39
+ raise ValueError(
40
+ "Didn't find CCL_ROOT, install oneCCL from https://github.com/oneapi-src/oneCCL and source its environment variable"
41
+ )
42
+ return []
43
+ else:
44
+ return ['-lccl', f'-L{ccl_root_path}/lib']
45
+
46
+
47
+ class ShareMemCommBuilder(CPUOpBuilder):
48
+ BUILD_VAR = "DS_BUILD_SHM_COMM"
49
+ NAME = "deepspeed_shm_comm"
50
+
51
+ def __init__(self, name=None):
52
+ name = self.NAME if name is None else name
53
+ super().__init__(name=name)
54
+
55
+ def absolute_name(self):
56
+ return f'deepspeed.ops.comm.{self.NAME}_op'
57
+
58
+ def sources(self):
59
+ return ['csrc/cpu/comm/shm_interface.cpp', 'csrc/cpu/comm/shm.cpp']
60
+
61
+ def include_paths(self):
62
+ includes = ['csrc/cpu/includes']
63
+ return includes
64
+
65
+ def cxx_args(self):
66
+ return ['-O2', '-fopenmp']
67
+
68
+ def is_compatible(self, verbose=True):
69
+ # TODO: add soft compatibility check for private binary release.
70
+ # a soft check, as in we know it can be trivially changed.
71
+ return super().is_compatible(verbose)
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/cpu_adam.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CPUOpBuilder
7
+
8
+
9
+ class CPUAdamBuilder(CPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_CPU_ADAM"
11
+ NAME = "cpu_adam"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.adam.{self.NAME}_op'
18
+
19
+ def sources(self):
20
+ return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp']
21
+
22
+ def libraries_args(self):
23
+ args = super().libraries_args()
24
+ return args
25
+
26
+ def include_paths(self):
27
+ return ['csrc/includes']
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/fused_adam.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CPUOpBuilder
7
+
8
+
9
+ class FusedAdamBuilder(CPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_FUSED_ADAM"
11
+ NAME = "fused_adam"
12
+
13
+ def __init__(self):
14
+ super().__init__(name=self.NAME)
15
+
16
+ def absolute_name(self):
17
+ return f'deepspeed.ops.adam.{self.NAME}_op'
18
+
19
+ def sources(self):
20
+ return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp']
21
+
22
+ def include_paths(self):
23
+ return ['csrc/includes']
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu/no_impl.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .builder import CPUOpBuilder
7
+
8
+
9
+ class NotImplementedBuilder(CPUOpBuilder):
10
+ BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED"
11
+ NAME = "deepspeed_not_implemented"
12
+
13
+ def __init__(self, name=None):
14
+ name = self.NAME if name is None else name
15
+ super().__init__(name=name)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.comm.{self.NAME}_op'
19
+
20
+ def load(self, verbose=True):
21
+ raise ValueError("This op had not been implemented on CPU backend.")
22
+
23
+ def sources(self):
24
+ return []
venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/cpu_lion.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from .builder import TorchCPUOpBuilder
8
+
9
+
10
+ class CPULionBuilder(TorchCPUOpBuilder):
11
+ BUILD_VAR = "DS_BUILD_CPU_LION"
12
+ NAME = "cpu_lion"
13
+
14
+ def __init__(self):
15
+ super().__init__(name=self.NAME)
16
+
17
+ def absolute_name(self):
18
+ return f'deepspeed.ops.lion.{self.NAME}_op'
19
+
20
+ def sources(self):
21
+ if self.build_for_cpu:
22
+ return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp']
23
+
24
+ return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp', 'csrc/common/custom_cuda_kernel.cu']
25
+
26
+ def libraries_args(self):
27
+ args = super().libraries_args()
28
+ if self.build_for_cpu:
29
+ return args
30
+
31
+ if not self.is_rocm_pytorch():
32
+ args += ['curand']
33
+
34
+ return args
35
+
36
+ def include_paths(self):
37
+ import torch
38
+ if self.build_for_cpu:
39
+ CUDA_INCLUDE = []
40
+ elif not self.is_rocm_pytorch():
41
+ CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
42
+ else:
43
+ CUDA_INCLUDE = [
44
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
45
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
46
+ os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
47
+ ]
48
+ return ['csrc/includes'] + CUDA_INCLUDE