applied-ai-018 commited on
Commit
dd3126b
·
verified ·
1 Parent(s): 9ad9e91

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/accelerator/__init__.py +7 -0
  2. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cpu_accelerator.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/hpu_accelerator.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/mps_accelerator.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/npu_accelerator.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/xpu_accelerator.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/accelerator/abstract_accelerator.py +297 -0
  8. venv/lib/python3.10/site-packages/deepspeed/accelerator/cpu_accelerator.py +332 -0
  9. venv/lib/python3.10/site-packages/deepspeed/accelerator/cuda_accelerator.py +369 -0
  10. venv/lib/python3.10/site-packages/deepspeed/accelerator/hpu_accelerator.py +303 -0
  11. venv/lib/python3.10/site-packages/deepspeed/accelerator/mps_accelerator.py +269 -0
  12. venv/lib/python3.10/site-packages/deepspeed/accelerator/npu_accelerator.py +287 -0
  13. venv/lib/python3.10/site-packages/deepspeed/accelerator/real_accelerator.py +257 -0
  14. venv/lib/python3.10/site-packages/deepspeed/accelerator/xpu_accelerator.py +298 -0
  15. venv/lib/python3.10/site-packages/deepspeed/autotuning/__init__.py +6 -0
  16. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/autotuner.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/config.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/constants.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/scheduler.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/utils.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/deepspeed/autotuning/autotuner.py +1113 -0
  23. venv/lib/python3.10/site-packages/deepspeed/autotuning/config.py +98 -0
  24. venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero0.json +5 -0
  25. venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero1.json +7 -0
  26. venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero2.json +11 -0
  27. venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero3.json +17 -0
  28. venv/lib/python3.10/site-packages/deepspeed/autotuning/constants.py +185 -0
  29. venv/lib/python3.10/site-packages/deepspeed/autotuning/scheduler.py +433 -0
  30. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__init__.py +8 -0
  31. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/base_tuner.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/cost_model.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/index_based_tuner.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/model_based_tuner.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/utils.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/base_tuner.py +72 -0
  38. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/cost_model.py +66 -0
  39. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/index_based_tuner.py +40 -0
  40. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/model_based_tuner.py +157 -0
  41. venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/utils.py +86 -0
  42. venv/lib/python3.10/site-packages/deepspeed/autotuning/utils.py +459 -0
  43. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/config.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/constants.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elastic_agent.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elasticity.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/__init__.py +15 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/aio/__init__.py +6 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/aio/__pycache__/__init__.cpython-310.pyc +0 -0
venv/lib/python3.10/site-packages/deepspeed/accelerator/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .abstract_accelerator import DeepSpeedAccelerator
7
+ from .real_accelerator import get_accelerator, set_accelerator, is_current_accelerator_supported
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cpu_accelerator.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/hpu_accelerator.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/mps_accelerator.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/npu_accelerator.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/xpu_accelerator.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/abstract_accelerator.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import abc
7
+ from abc import ABC
8
+
9
+
10
+ class DeepSpeedAccelerator(ABC):
11
+
12
+ def __init__(self):
13
+ self._name = None
14
+ self._communication_backend_name = None
15
+
16
+ @abc.abstractmethod
17
+ def is_synchronized_device(self):
18
+ ...
19
+
20
+ @abc.abstractmethod
21
+ def use_host_timers(self):
22
+ ...
23
+
24
+ @abc.abstractmethod
25
+ def resolves_data_dependency(self):
26
+ ...
27
+
28
+ @abc.abstractmethod
29
+ def handles_memory_backpressure(self):
30
+ ...
31
+
32
+ # Device APIs
33
+ @abc.abstractmethod
34
+ def device_name(self, device_index):
35
+ ...
36
+
37
+ @abc.abstractmethod
38
+ def device(self, device_index):
39
+ ...
40
+
41
+ @abc.abstractmethod
42
+ def set_device(self, device_index):
43
+ ...
44
+
45
+ @abc.abstractmethod
46
+ def current_device(self):
47
+ ...
48
+
49
+ @abc.abstractmethod
50
+ def current_device_name(self):
51
+ ...
52
+
53
+ @abc.abstractmethod
54
+ def device_count(self):
55
+ ...
56
+
57
+ @abc.abstractmethod
58
+ def synchronize(self, device_index=None):
59
+ ...
60
+
61
+ # RNG APIs
62
+ @abc.abstractmethod
63
+ def random(self):
64
+ ...
65
+
66
+ @abc.abstractmethod
67
+ def set_rng_state(self, new_state, device_index=None):
68
+ ...
69
+
70
+ @abc.abstractmethod
71
+ def get_rng_state(self, device_index=None):
72
+ ...
73
+
74
+ @abc.abstractmethod
75
+ def manual_seed(self, seed):
76
+ ...
77
+
78
+ @abc.abstractmethod
79
+ def manual_seed_all(self, seed):
80
+ ...
81
+
82
+ @abc.abstractmethod
83
+ def initial_seed(self, seed):
84
+ ...
85
+
86
+ @abc.abstractmethod
87
+ def default_generator(self, device_index):
88
+ ...
89
+
90
+ # Streams/Events
91
+ @property
92
+ @abc.abstractmethod
93
+ def Stream(self):
94
+ ...
95
+
96
+ @abc.abstractmethod
97
+ def stream(self, stream):
98
+ ...
99
+
100
+ @abc.abstractmethod
101
+ def current_stream(self, device_index=None):
102
+ ...
103
+
104
+ @abc.abstractmethod
105
+ def default_stream(self, device_index=None):
106
+ ...
107
+
108
+ @property
109
+ @abc.abstractmethod
110
+ def Event(self):
111
+ ...
112
+
113
+ # Memory management
114
+ @abc.abstractmethod
115
+ def empty_cache(self):
116
+ ...
117
+
118
+ @abc.abstractmethod
119
+ def memory_allocated(self, device_index=None):
120
+ ...
121
+
122
+ @abc.abstractmethod
123
+ def max_memory_allocated(self, device_index=None):
124
+ ...
125
+
126
+ @abc.abstractmethod
127
+ def reset_max_memory_allocated(self, device_index=None):
128
+ ...
129
+
130
+ @abc.abstractmethod
131
+ def memory_cached(self, device_index=None):
132
+ ...
133
+
134
+ @abc.abstractmethod
135
+ def max_memory_cached(self, device_index=None):
136
+ ...
137
+
138
+ @abc.abstractmethod
139
+ def reset_max_memory_cached(self, device_index=None):
140
+ ...
141
+
142
+ @abc.abstractmethod
143
+ def memory_stats(self, device_index=None):
144
+ ...
145
+
146
+ @abc.abstractmethod
147
+ def reset_peak_memory_stats(self, device_index=None):
148
+ ...
149
+
150
+ @abc.abstractmethod
151
+ def memory_reserved(self, device_index=None):
152
+ ...
153
+
154
+ @abc.abstractmethod
155
+ def max_memory_reserved(self, device_index=None):
156
+ ...
157
+
158
+ @abc.abstractmethod
159
+ def total_memory(self, device_index=None):
160
+ ...
161
+
162
+ @abc.abstractmethod
163
+ def available_memory(self, device_index=None):
164
+ ...
165
+
166
+ # Data types
167
+ @abc.abstractmethod
168
+ def is_bf16_supported(self):
169
+ ...
170
+
171
+ @abc.abstractmethod
172
+ def is_fp16_supported(self):
173
+ ...
174
+
175
+ @abc.abstractmethod
176
+ def supported_dtypes(self):
177
+ ...
178
+
179
+ # Misc
180
+ @abc.abstractmethod
181
+ def amp(self):
182
+ ...
183
+
184
+ @abc.abstractmethod
185
+ def is_available(self):
186
+ ...
187
+
188
+ @abc.abstractmethod
189
+ def range_push(self, msg):
190
+ ...
191
+
192
+ @abc.abstractmethod
193
+ def range_pop(self):
194
+ ...
195
+
196
+ @abc.abstractmethod
197
+ def lazy_call(self, callback):
198
+ ...
199
+
200
+ @abc.abstractmethod
201
+ def communication_backend_name(self):
202
+ ...
203
+
204
+ @abc.abstractmethod
205
+ def is_triton_supported(self):
206
+ ...
207
+
208
+ # Graph operations
209
+ @abc.abstractmethod
210
+ def create_graph(self):
211
+ ...
212
+
213
+ @abc.abstractmethod
214
+ def capture_to_graph(self, graph, pool=None, stream=None):
215
+ ...
216
+
217
+ @abc.abstractmethod
218
+ def replay_graph(self, graph):
219
+ ...
220
+
221
+ # Tensor operations
222
+ @property
223
+ @abc.abstractmethod
224
+ def BFloat16Tensor(self):
225
+ ...
226
+
227
+ @property
228
+ @abc.abstractmethod
229
+ def ByteTensor(self):
230
+ ...
231
+
232
+ @property
233
+ @abc.abstractmethod
234
+ def DoubleTensor(self):
235
+ ...
236
+
237
+ @property
238
+ @abc.abstractmethod
239
+ def FloatTensor(self):
240
+ ...
241
+
242
+ @property
243
+ @abc.abstractmethod
244
+ def HalfTensor(self):
245
+ ...
246
+
247
+ @property
248
+ @abc.abstractmethod
249
+ def IntTensor(self):
250
+ ...
251
+
252
+ @property
253
+ @abc.abstractmethod
254
+ def LongTensor(self):
255
+ ...
256
+
257
+ @abc.abstractmethod
258
+ def pin_memory(self, tensor, align_bytes=1):
259
+ ...
260
+
261
+ @abc.abstractmethod
262
+ def is_pinned(self, tensor):
263
+ ...
264
+
265
+ @abc.abstractmethod
266
+ def on_accelerator(self, tensor):
267
+ ...
268
+
269
+ @abc.abstractmethod
270
+ def op_builder_dir(self):
271
+ ...
272
+
273
+ # create an instance of op builder, specified by class_name
274
+ @abc.abstractmethod
275
+ def create_op_builder(self, class_name):
276
+ ...
277
+
278
+ # return an op builder class, specified by class_name
279
+ @abc.abstractmethod
280
+ def get_op_builder(self, class_name):
281
+ ...
282
+
283
+ @abc.abstractmethod
284
+ def build_extension(self):
285
+ ...
286
+
287
+ @abc.abstractmethod
288
+ def export_envs(self):
289
+ ...
290
+
291
+ @abc.abstractmethod
292
+ def visible_devices_envs(self):
293
+ ...
294
+
295
+ @abc.abstractmethod
296
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
297
+ ...
venv/lib/python3.10/site-packages/deepspeed/accelerator/cpu_accelerator.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from .abstract_accelerator import DeepSpeedAccelerator
8
+
9
+ try:
10
+ import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore
11
+ oneccl_imported_p = True
12
+ except ImportError as e:
13
+ oneccl_imported_p = False
14
+
15
+ import os
16
+
17
+
18
+ # accelerator for Intel CPU
19
+ class CPU_Accelerator(DeepSpeedAccelerator):
20
+
21
+ def __init__(self):
22
+ self._name = 'cpu'
23
+ if oneccl_imported_p:
24
+ self._communication_backend_name = 'ccl'
25
+ else:
26
+ # fallback to gloo if oneccl_binding_for_pytorch is not installed
27
+ self._communication_backend_name = 'gloo'
28
+ try:
29
+ import psutil
30
+ mem = psutil.Process().memory_info().rss
31
+ self.max_mem = mem
32
+ except ImportError as e:
33
+ self.max_mem = 0
34
+
35
+ def is_synchronized_device(self):
36
+ return True
37
+
38
+ def use_host_timers(self):
39
+ return self.is_synchronized_device()
40
+
41
+ def resolves_data_dependency(self):
42
+ return self.is_synchronized_device()
43
+
44
+ def handles_memory_backpressure(self):
45
+ return self.is_synchronized_device()
46
+
47
+ # Device APIs
48
+ def device_name(self, device_index=None):
49
+ return 'cpu'
50
+
51
+ def device(self, device_index=None):
52
+ return None
53
+
54
+ def set_device(self, device_index):
55
+ return
56
+
57
+ def current_device(self):
58
+ return os.environ.get('LOCAL_RANK', 0)
59
+
60
+ def current_device_name(self):
61
+ return 'cpu'
62
+
63
+ def device_count(self):
64
+ device_count = int(os.environ.get('LOCAL_SIZE', 0))
65
+ if device_count > 0:
66
+ return device_count
67
+ else:
68
+ from deepspeed.utils.numa import get_numa_cores
69
+ # Count NUMA node for number of cpu accelerators. On machine with HBM
70
+ # In flat mode, HBM is in separate NUMA node with no cores on this node.
71
+ # Ignore these NUMA nodes with no cores.
72
+ numa_core_lists = get_numa_cores()
73
+ numa_count = 0
74
+ prev_core_list = []
75
+ for core_list in numa_core_lists:
76
+ if len(core_list) > 0 and core_list != prev_core_list:
77
+ numa_count += 1
78
+ prev_core_list = core_list
79
+ return numa_count
80
+
81
+ def synchronize(self, device_index=None):
82
+ return
83
+
84
+ # RNG APIs
85
+ def random(self):
86
+ return torch.random
87
+
88
+ def set_rng_state(self, new_state, device_index=None):
89
+ if device_index is None:
90
+ return torch.set_rng_state(new_state)
91
+ return torch.set_rng_state(new_state, device_index)
92
+
93
+ def get_rng_state(self, device_index=None):
94
+ return torch.get_rng_state()
95
+
96
+ def manual_seed(self, seed):
97
+ return torch.manual_seed(seed)
98
+
99
+ def manual_seed_all(self, seed):
100
+ return torch.manual_seed(seed)
101
+
102
+ def initial_seed(self, seed):
103
+ return torch.initial_seed(seed)
104
+
105
+ def default_generator(self, device_index):
106
+ return torch.default_generator
107
+
108
+ # Streams/Events
109
+ @property
110
+ def Stream(self):
111
+ return None
112
+
113
+ def stream(self, stream):
114
+ from deepspeed.runtime.utils import noop_context
115
+ return noop_context()
116
+
117
+ def current_stream(self, device_index=None):
118
+ return None
119
+
120
+ def default_stream(self, device_index=None):
121
+ return None
122
+
123
+ @property
124
+ def Event(self):
125
+ return None
126
+
127
+ # Memory management
128
+ def empty_cache(self):
129
+ return
130
+
131
+ def get_rss(self):
132
+ import psutil
133
+ mem = psutil.Process().memory_info().rss
134
+ if mem > self.max_mem:
135
+ self.max_mem = mem
136
+ return mem
137
+
138
+ def reset_rss(self):
139
+ import psutil
140
+ mem = psutil.Process().memory_info().rss
141
+ self.max_mem = mem
142
+ return mem
143
+
144
+ def memory_allocated(self, device_index=None):
145
+ return self.get_rss()
146
+
147
+ def max_memory_allocated(self, device_index=None):
148
+ self.get_rss()
149
+ return self.max_mem
150
+
151
+ def reset_max_memory_allocated(self, device_index=None):
152
+ self.reset_rss()
153
+ return
154
+
155
+ def memory_cached(self, device_index=None):
156
+ return self.get_rss()
157
+
158
+ def max_memory_cached(self, device_index=None):
159
+ self.get_rss()
160
+ return self.max_mem
161
+
162
+ def reset_max_memory_cached(self, device_index=None):
163
+ self.reset_rss()
164
+ return
165
+
166
+ def memory_stats(self, device_index=None):
167
+ mem = self.get_rss()
168
+ mem_stat = {}
169
+ mem_stat['allocated_bytes.all.current'] = mem
170
+ mem_stat['allocated_bytes.all.peak'] = self.max_mem
171
+ return mem_stat
172
+
173
+ def reset_peak_memory_stats(self, device_index=None):
174
+ self.reset_rss()
175
+ return
176
+
177
+ def memory_reserved(self, device_index=None):
178
+ return self.get_rss()
179
+
180
+ def max_memory_reserved(self, device_index=None):
181
+ self.get_rss()
182
+ return self.max_mem
183
+
184
+ def total_memory(self, device_index=None):
185
+ import psutil
186
+ return psutil.virtual_memory().total
187
+
188
+ def available_memory(self, device_index=None):
189
+ import psutil
190
+ return psutil.virtual_memory().available
191
+
192
+ # Misc
193
+ def amp(self):
194
+ return torch.cpu.amp
195
+
196
+ def is_available(self):
197
+ return True
198
+
199
+ def range_push(self, msg):
200
+ # TODO itt is currently not supported yet
201
+ # return torch.profiler.itt.range_push(msg)
202
+ return
203
+
204
+ def range_pop(self):
205
+ # TODO itt is currently not supported yet
206
+ # return torch.profiler.itt.range_pop()
207
+ return
208
+
209
+ def lazy_call(self, callback):
210
+ return callback()
211
+
212
+ def communication_backend_name(self):
213
+ return self._communication_backend_name
214
+
215
+ def is_triton_supported(self):
216
+ return False
217
+
218
+ # Data types
219
+ def is_bf16_supported(self):
220
+ return True
221
+
222
+ def is_fp16_supported(self):
223
+ return False
224
+
225
+ def supported_dtypes(self):
226
+ return [torch.float, torch.bfloat16]
227
+
228
+ # Graph operations
229
+ def create_graph(self):
230
+ return None
231
+
232
+ def capture_to_graph(self, graph, pool=None, stream=None):
233
+ from deepspeed.runtime.utils import noop_context
234
+ return noop_context()
235
+
236
+ def replay_graph(self, graph):
237
+ return
238
+
239
+ # Tensor operations
240
+ @property
241
+ def BFloat16Tensor(self):
242
+ return torch.BFloat16Tensor
243
+
244
+ @property
245
+ def ByteTensor(self):
246
+ return torch.ByteTensor
247
+
248
+ @property
249
+ def DoubleTensor(self):
250
+ return torch.DoubleTensor
251
+
252
+ @property
253
+ def FloatTensor(self):
254
+ return torch.FloatTensor
255
+
256
+ @property
257
+ def HalfTensor(self):
258
+ return torch.HalfTensor
259
+
260
+ @property
261
+ def IntTensor(self):
262
+ return torch.IntTensor
263
+
264
+ @property
265
+ def LongTensor(self):
266
+ return torch.LongTensor
267
+
268
+ def pin_memory(self, tensor, align_bytes=1):
269
+ return tensor
270
+
271
+ def is_pinned(self, tensor):
272
+ return tensor.is_pinned()
273
+
274
+ def op_builder_dir(self):
275
+ try:
276
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
277
+ # if successful this also means we're doing a local install and not JIT compile path
278
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
279
+ return "op_builder.cpu"
280
+ except ImportError:
281
+ return "deepspeed.ops.op_builder.cpu"
282
+
283
+ def on_accelerator(self, tensor):
284
+ device_str = str(tensor.device)
285
+ if device_str.startswith('cpu'):
286
+ return True
287
+ else:
288
+ return False
289
+
290
+ # create an instance of op builder and return, name specified by class_name
291
+ def create_op_builder(self, op_name):
292
+ builder_class = self.get_op_builder(op_name)
293
+ if builder_class is not None:
294
+ return builder_class()
295
+ return None
296
+
297
+ # return an op builder class, name specified by class_name
298
+ def get_op_builder(self, class_name):
299
+ try:
300
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
301
+ # if successful this also means we're doing a local install and not JIT compile path
302
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
303
+ from op_builder.cpu import CCLCommBuilder, ShareMemCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder
304
+ except ImportError:
305
+ from deepspeed.ops.op_builder.cpu import CCLCommBuilder, ShareMemCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder
306
+
307
+ if class_name == "CCLCommBuilder":
308
+ return CCLCommBuilder
309
+ elif class_name == "ShareMemCommBuilder":
310
+ return ShareMemCommBuilder
311
+ elif class_name == "FusedAdamBuilder":
312
+ return FusedAdamBuilder
313
+ elif class_name == "CPUAdamBuilder":
314
+ return CPUAdamBuilder
315
+ else:
316
+ # return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests
317
+ return NotImplementedBuilder
318
+
319
+ def build_extension(self):
320
+ from torch.utils.cpp_extension import BuildExtension
321
+ return BuildExtension
322
+
323
+ def export_envs(self):
324
+ return []
325
+
326
+ # TODO: cpu's visible envs is confirmed, keep as CUDA_VISIBLE_DEVICES
327
+ def visible_devices_envs(self):
328
+ return ['CUDA_VISIBLE_DEVICES']
329
+
330
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
331
+ for env in self.visible_devices_envs():
332
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/accelerator/cuda_accelerator.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import functools
7
+ import os
8
+ import pkgutil
9
+ import importlib
10
+
11
+ from .abstract_accelerator import DeepSpeedAccelerator
12
+ # During setup stage torch may not be installed, pass on no torch will
13
+ # allow op builder related API to be executed.
14
+ try:
15
+ import torch.cuda
16
+ except ImportError:
17
+ pass
18
+
19
+ # Delay import pynvml to avoid import error when CUDA is not available
20
+ pynvml = None
21
+
22
+
23
+ class CUDA_Accelerator(DeepSpeedAccelerator):
24
+
25
+ def __init__(self):
26
+ self._name = 'cuda'
27
+ self._communication_backend_name = 'nccl'
28
+ if pynvml is None:
29
+ self._init_pynvml()
30
+
31
+ def _init_pynvml(self):
32
+ global pynvml
33
+ try:
34
+ import pynvml
35
+ except ImportError:
36
+ return
37
+ try:
38
+ pynvml.nvmlInit()
39
+ except pynvml.NVMLError:
40
+ pynvml = None
41
+ return
42
+
43
+ def is_synchronized_device(self):
44
+ return False
45
+
46
+ def use_host_timers(self):
47
+ return self.is_synchronized_device()
48
+
49
+ def resolves_data_dependency(self):
50
+ return self.is_synchronized_device()
51
+
52
+ def handles_memory_backpressure(self):
53
+ return self.is_synchronized_device()
54
+
55
+ # Device APIs
56
+ def device_name(self, device_index=None):
57
+ if device_index is None:
58
+ return 'cuda'
59
+ return 'cuda:{}'.format(device_index)
60
+
61
+ def device(self, device_index=None):
62
+ return torch.cuda.device(device_index)
63
+
64
+ def set_device(self, device_index):
65
+ torch.cuda.set_device(device_index)
66
+
67
+ def current_device(self):
68
+ return torch.cuda.current_device()
69
+
70
+ def current_device_name(self):
71
+ return 'cuda:{}'.format(torch.cuda.current_device())
72
+
73
+ def device_count(self):
74
+ return torch.cuda.device_count()
75
+
76
+ def synchronize(self, device_index=None):
77
+ return torch.cuda.synchronize(device_index)
78
+
79
+ # RNG APIs
80
+ def random(self):
81
+ return torch.random
82
+
83
+ def set_rng_state(self, new_state, device_index=None):
84
+ if device_index is None:
85
+ return torch.cuda.set_rng_state(new_state)
86
+
87
+ return torch.cuda.set_rng_state(new_state, device_index)
88
+
89
+ def get_rng_state(self, device_index=None):
90
+ if device_index is None:
91
+ return torch.cuda.get_rng_state()
92
+
93
+ return torch.cuda.get_rng_state(device_index)
94
+
95
+ def manual_seed(self, seed):
96
+ return torch.cuda.manual_seed(seed)
97
+
98
+ def manual_seed_all(self, seed):
99
+ return torch.cuda.manual_seed_all(seed)
100
+
101
+ def initial_seed(self, seed):
102
+ return torch.cuda.initial_seed(seed)
103
+
104
+ def default_generator(self, device_index):
105
+ return torch.cuda.default_generators[device_index]
106
+
107
+ # Streams/Events
108
+ @property
109
+ def Stream(self):
110
+ return torch.cuda.Stream
111
+
112
+ def stream(self, stream):
113
+ return torch.cuda.stream(stream)
114
+
115
+ def current_stream(self, device_index=None):
116
+ return torch.cuda.current_stream(device_index)
117
+
118
+ def default_stream(self, device_index=None):
119
+ return torch.cuda.default_stream(device_index)
120
+
121
+ @property
122
+ def Event(self):
123
+ return torch.cuda.Event
124
+
125
+ # Memory management
126
+ def empty_cache(self):
127
+ return torch.cuda.empty_cache()
128
+
129
+ def memory_allocated(self, device_index=None):
130
+ return torch.cuda.memory_allocated(device_index)
131
+
132
+ def max_memory_allocated(self, device_index=None):
133
+ return torch.cuda.max_memory_allocated(device_index)
134
+
135
+ def reset_max_memory_allocated(self, device_index=None):
136
+ return torch.cuda.reset_max_memory_allocated(device_index)
137
+
138
+ def memory_cached(self, device_index=None):
139
+ return torch.cuda.memory_cached(device_index)
140
+
141
+ def max_memory_cached(self, device_index=None):
142
+ return torch.cuda.max_memory_cached(device_index)
143
+
144
+ def reset_max_memory_cached(self, device_index=None):
145
+ return torch.cuda.reset_max_memory_cached(device_index)
146
+
147
+ def memory_stats(self, device_index=None):
148
+ if hasattr(torch.cuda, 'memory_stats'):
149
+ return torch.cuda.memory_stats(device_index)
150
+
151
+ def reset_peak_memory_stats(self, device_index=None):
152
+ if hasattr(torch.cuda, 'reset_peak_memory_stats'):
153
+ return torch.cuda.reset_peak_memory_stats(device_index)
154
+
155
+ def memory_reserved(self, device_index=None):
156
+ if hasattr(torch.cuda, 'memory_reserved'):
157
+ return torch.cuda.memory_reserved(device_index)
158
+
159
+ def max_memory_reserved(self, device_index=None):
160
+ if hasattr(torch.cuda, 'max_memory_reserved'):
161
+ return torch.cuda.max_memory_reserved(device_index)
162
+
163
+ def total_memory(self, device_index=None):
164
+ return torch.cuda.get_device_properties(device_index).total_memory
165
+
166
+ def _get_nvml_gpu_id(self, torch_gpu_id):
167
+ """
168
+ credit: https://discuss.pytorch.org/t/making-pynvml-match-torch-device-ids-cuda-visible-devices/103020
169
+
170
+ Remap torch device id to nvml device id, respecting CUDA_VISIBLE_DEVICES.
171
+
172
+ If the latter isn't set return the same id
173
+ """
174
+ # if CUDA_VISIBLE_DEVICES is used automagically remap the id since pynvml ignores this env var
175
+ if "CUDA_VISIBLE_DEVICES" in os.environ:
176
+ ids = list(map(int, os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",")))
177
+ return ids[torch_gpu_id] # remap
178
+ else:
179
+ return torch_gpu_id
180
+
181
+ def available_memory(self, device_index=None):
182
+ if pynvml:
183
+ if device_index is None:
184
+ device_index = self.current_device()
185
+ handle = pynvml.nvmlDeviceGetHandleByIndex(self._get_nvml_gpu_id(device_index))
186
+ info = pynvml.nvmlDeviceGetMemoryInfo(handle)
187
+ return info.free
188
+ else:
189
+ return self.total_memory(device_index) - self.memory_allocated(device_index)
190
+
191
+ # Data types
192
+ def is_bf16_supported(self):
193
+ if not torch.cuda.is_available():
194
+ return True
195
+ return torch.cuda.is_bf16_supported()
196
+
197
+ def is_fp16_supported(self):
198
+ if not torch.cuda.is_available():
199
+ return True
200
+ # See https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#hardware-precision-matrix
201
+ # FP16 on compute capability 6.x is deprecated
202
+ allow_deprecated_fp16 = os.environ.get('DS_ALLOW_DEPRECATED_FP16', '0') == '1'
203
+ major, _ = torch.cuda.get_device_capability()
204
+ if major >= 7:
205
+ return True
206
+ elif major == 6 and allow_deprecated_fp16:
207
+ return True
208
+ else:
209
+ return False
210
+
211
+ def supported_dtypes(self):
212
+ supported_dtypes = [torch.float]
213
+ if self.is_fp16_supported():
214
+ supported_dtypes.append(torch.half)
215
+ if self.is_bf16_supported():
216
+ supported_dtypes.append(torch.bfloat16)
217
+ return supported_dtypes
218
+
219
+ # Misc
220
+ def amp(self):
221
+ if hasattr(torch.cuda, 'amp'):
222
+ return torch.cuda.amp
223
+ return None
224
+
225
+ def is_available(self):
226
+ return torch.cuda.is_available()
227
+
228
+ def range_push(self, msg):
229
+ if hasattr(torch.cuda.nvtx, 'range_push'):
230
+ return torch.cuda.nvtx.range_push(msg)
231
+
232
+ def range_pop(self):
233
+ if hasattr(torch.cuda.nvtx, 'range_pop'):
234
+ return torch.cuda.nvtx.range_pop()
235
+
236
+ def lazy_call(self, callback):
237
+ return torch.cuda._lazy_call(callback)
238
+
239
+ def communication_backend_name(self):
240
+ return self._communication_backend_name
241
+
242
+ def is_triton_supported(self):
243
+ major, _ = torch.cuda.get_device_capability()
244
+ if major >= 8:
245
+ return True
246
+ else:
247
+ return False
248
+
249
+ # Graph operations
250
+ def create_graph(self):
251
+ return torch.cuda.CUDAGraph()
252
+
253
+ def capture_to_graph(self, graph, pool=None, stream=None):
254
+ return torch.cuda.graph(graph, pool, stream)
255
+
256
+ def replay_graph(self, graph):
257
+ graph.replay()
258
+ return
259
+
260
+ # Tensor operations
261
+
262
+ @property
263
+ def BFloat16Tensor(self):
264
+ return functools.partial(torch.tensor, dtype=torch.bfloat16, device='cuda')
265
+
266
+ @property
267
+ def ByteTensor(self):
268
+ return functools.partial(torch.tensor, dtype=torch.uint8, device='cuda')
269
+
270
+ @property
271
+ def DoubleTensor(self):
272
+ return functools.partial(torch.tensor, dtype=torch.double, device='cuda')
273
+
274
+ @property
275
+ def FloatTensor(self):
276
+ return functools.partial(torch.tensor, dtype=torch.float, device='cuda')
277
+
278
+ @property
279
+ def HalfTensor(self):
280
+ return functools.partial(torch.tensor, dtype=torch.half, device='cuda')
281
+
282
+ @property
283
+ def IntTensor(self):
284
+ return functools.partial(torch.tensor, dtype=torch.int, device='cuda')
285
+
286
+ @property
287
+ def LongTensor(self):
288
+ return functools.partial(torch.tensor, dtype=torch.long, device='cuda')
289
+
290
+ def pin_memory(self, tensor, align_bytes=1):
291
+ return tensor.pin_memory()
292
+
293
+ def is_pinned(self, tensor):
294
+ return tensor.is_pinned()
295
+
296
+ def on_accelerator(self, tensor):
297
+ device_str = str(tensor.device)
298
+ if device_str.startswith('cuda:'):
299
+ return True
300
+ else:
301
+ return False
302
+
303
+ def op_builder_dir(self):
304
+ try:
305
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
306
+ # if successful this also means we're doing a local install and not JIT compile path
307
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
308
+ return "op_builder"
309
+ except ImportError:
310
+ return "deepspeed.ops.op_builder"
311
+
312
+ # dict that holds class name <--> class type mapping i.e.
313
+ # 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
314
+ # this dict will be filled at init stage
315
+ class_dict = None
316
+
317
+ def _lazy_init_class_dict(self):
318
+ if self.class_dict is not None:
319
+ return
320
+ else:
321
+ self.class_dict = {}
322
+ # begin initialize for create_op_builder()
323
+ # put all valid class name <--> class type mapping into class_dict
324
+ op_builder_dir = self.op_builder_dir()
325
+ op_builder_module = importlib.import_module(op_builder_dir)
326
+ op_builder_absolute_path = os.path.dirname(op_builder_module.__file__)
327
+ for _, module_name, _ in pkgutil.iter_modules([op_builder_absolute_path]):
328
+ # avoid self references,
329
+ # skip sub_directories which contains ops for other backend(cpu, npu, etc.).
330
+ if module_name != 'all_ops' and module_name != 'builder' and not os.path.isdir(
331
+ os.path.join(op_builder_absolute_path, module_name)):
332
+ module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
333
+ for member_name in module.__dir__():
334
+ if member_name.endswith(
335
+ 'Builder'
336
+ ) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
337
+ if not member_name in self.class_dict:
338
+ self.class_dict[member_name] = getattr(module, member_name)
339
+ # end initialize for create_op_builder()
340
+
341
+ # create an instance of op builder and return, name specified by class_name
342
+ def create_op_builder(self, class_name):
343
+ self._lazy_init_class_dict()
344
+ if class_name in self.class_dict:
345
+ return self.class_dict[class_name]()
346
+ else:
347
+ return None
348
+
349
+ # return an op builder class, name specified by class_name
350
+ def get_op_builder(self, class_name):
351
+ self._lazy_init_class_dict()
352
+ if class_name in self.class_dict:
353
+ return self.class_dict[class_name]
354
+ else:
355
+ return None
356
+
357
+ def build_extension(self):
358
+ from torch.utils.cpp_extension import BuildExtension
359
+ return BuildExtension
360
+
361
+ def export_envs(self):
362
+ return ['NCCL']
363
+
364
+ def visible_devices_envs(self):
365
+ return ['CUDA_VISIBLE_DEVICES']
366
+
367
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
368
+ for env in self.visible_devices_envs():
369
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/accelerator/hpu_accelerator.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import pkgutil
8
+ import importlib
9
+ import torch
10
+
11
+ from .abstract_accelerator import DeepSpeedAccelerator
12
+
13
+
14
+ class HPU_Accelerator(DeepSpeedAccelerator):
15
+
16
+ def __init__(self):
17
+ self._name = 'hpu'
18
+ self._communication_backend_name = 'hccl'
19
+ try:
20
+ import habana_frameworks.torch.hpu as hpu
21
+ hpu.setDeterministic(True)
22
+ self.hpu = hpu
23
+ except ImportError as e:
24
+ raise ValueError(
25
+ f"HPU_Accelerator requires habana_frameworks.torch.hpu, which is not installed on this system.")
26
+
27
+ self.fp16_supported = None
28
+
29
+ # Device APIs
30
+ def is_synchronized_device(self):
31
+ return False
32
+
33
+ def use_host_timers(self):
34
+ return False
35
+
36
+ def resolves_data_dependency(self):
37
+ return True
38
+
39
+ def handles_memory_backpressure(self):
40
+ return True
41
+
42
+ def device_name(self, device_index=None):
43
+ if device_index is None:
44
+ return 'hpu'
45
+ return 'hpu:{}'.format(device_index)
46
+
47
+ def device(self, device_index=None):
48
+ return torch.device(self.device_name(device_index))
49
+
50
+ def set_device(self, device_index):
51
+ self.hpu.set_device(device_index)
52
+
53
+ def current_device(self):
54
+ return (self.hpu.current_device())
55
+
56
+ def current_device_name(self):
57
+ return 'hpu:{}'.format(self.current_device())
58
+
59
+ def device_count(self):
60
+ return self.hpu.device_count()
61
+
62
+ def synchronize(self, device_index=None):
63
+ return self.hpu.synchronize()
64
+
65
+ # RNG APIs
66
+ def random(self):
67
+ return torch.random
68
+
69
+ def set_rng_state(self, new_state, device_index=None):
70
+ self.hpu.random.set_rng_state(new_state)
71
+
72
+ def get_rng_state(self, device_index=None):
73
+ return self.hpu.random.get_rng_state()
74
+
75
+ def manual_seed(self, seed):
76
+ self.hpu.random.manual_seed(seed)
77
+
78
+ def manual_seed_all(self, seed):
79
+ self.hpu.random.manual_seed_all(seed)
80
+
81
+ def initial_seed(self, seed):
82
+ self.hpu.random.initial_seed(seed)
83
+
84
+ def default_generator(self, device_index):
85
+ return self.hpu.random.default_generators[device_index]
86
+
87
+ # Streams/Events
88
+ @property
89
+ def Stream(self):
90
+ return self.hpu.Stream
91
+
92
+ def stream(self, stream):
93
+ return self.hpu.stream(stream)
94
+
95
+ def current_stream(self, device_index=None):
96
+ return self.hpu.current_stream()
97
+
98
+ def default_stream(self, device_index=None):
99
+ return self.hpu.default_stream()
100
+
101
+ @property
102
+ def Event(self):
103
+ import habana_frameworks.torch.core as htcore
104
+ return htcore.hpu.Event
105
+
106
+ # Memory management
107
+ def empty_cache(self):
108
+ return
109
+
110
+ def memory_allocated(self, device_index=None):
111
+ return self.hpu.memory_allocated()
112
+
113
+ def max_memory_allocated(self, device_index=None):
114
+ return self.hpu.max_memory_allocated()
115
+
116
+ def reset_max_memory_allocated(self, device_index=None):
117
+ return self.hpu.reset_max_memory_allocated()
118
+
119
+ def memory_cached(self, device_index=None):
120
+ return self.hpu.memory_cached(device_index)
121
+
122
+ def max_memory_cached(self, device_index=None):
123
+ return self.hpu.max_memory_cached(device_index)
124
+
125
+ def reset_max_memory_cached(self, device_index=None):
126
+ return None
127
+
128
+ def memory_stats(self, device_index=None):
129
+ return self.hpu.memory_stats(device_index)
130
+
131
+ def reset_peak_memory_stats(self, device_index=None):
132
+ self.hpu.reset_peak_memory_stats(device_index)
133
+
134
+ def memory_reserved(self, device_index=None):
135
+ return self.hpu.memory_reserved(device_index)
136
+
137
+ def max_memory_reserved(self, device_index=None):
138
+ return self.hpu.max_memory_reserved(device_index)
139
+
140
+ def total_memory(self, device_index=None):
141
+ return self.memory_stats(device_index)['Limit']
142
+
143
+ def available_memory(self, device_index=None):
144
+ return self.total_memory(device_index) - self.memory_allocated(device_index)
145
+
146
+ # Data types
147
+ def is_bf16_supported(self):
148
+ return True
149
+
150
+ def is_fp16_supported(self):
151
+ if self.fp16_supported is None:
152
+ import habana_frameworks.torch.utils.experimental as htexp
153
+ self.fp16_supported = htexp._is_fp16_supported()
154
+ return self.fp16_supported
155
+
156
+ def supported_dtypes(self):
157
+ supported_dtypes = [torch.float, torch.bfloat16]
158
+ if self.is_fp16_supported():
159
+ supported_dtypes.append(torch.half)
160
+ return supported_dtypes
161
+
162
+ # Misc
163
+ def amp(self):
164
+ return None
165
+
166
+ def is_available(self):
167
+ return self.hpu.is_available()
168
+
169
+ def range_push(self, msg):
170
+ return
171
+
172
+ def range_pop(self):
173
+ return
174
+
175
+ def lazy_call(self, callback):
176
+ callback()
177
+
178
+ def communication_backend_name(self):
179
+ return self._communication_backend_name
180
+
181
+ def is_triton_supported(self):
182
+ return False
183
+
184
+ # Graph operations
185
+ def create_graph(self):
186
+ return self.hpu.HPUGraph()
187
+
188
+ def capture_to_graph(self, graph, pool=None, stream=None):
189
+ return self.hpu.graph(graph, stream=stream)
190
+
191
+ def replay_graph(self, graph):
192
+ graph.replay()
193
+ return
194
+
195
+ # Tensor operations
196
+ @property
197
+ def BFloat16Tensor(self):
198
+ return self.hpu.BFloat16Tensor
199
+
200
+ @property
201
+ def ByteTensor(self):
202
+ return self.hpu.ByteTensor
203
+
204
+ @property
205
+ def DoubleTensor(self):
206
+ return self.hpu.DoubleTensor
207
+
208
+ @property
209
+ def FloatTensor(self):
210
+ return self.hpu.FloatTensor
211
+
212
+ @property
213
+ def HalfTensor(self):
214
+ return self.hpu.HalfTensor
215
+
216
+ @property
217
+ def IntTensor(self):
218
+ return self.hpu.IntTensor
219
+
220
+ @property
221
+ def LongTensor(self):
222
+ return self.hpu.LongTensor
223
+
224
+ def pin_memory(self, tensor, align_bytes=1):
225
+ return tensor.pin_memory(self.device())
226
+
227
+ def is_pinned(self, tensor):
228
+ return tensor.is_pinned()
229
+
230
+ def on_accelerator(self, tensor):
231
+ device_str = str(tensor.device)
232
+ if device_str.startswith('hpu:'):
233
+ return True
234
+ else:
235
+ return False
236
+
237
+ def op_builder_dir(self):
238
+ try:
239
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
240
+ # if successful this also means we're doing a local install and not JIT compile path
241
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
242
+ return "op_builder.hpu"
243
+ except ImportError:
244
+ return "deepspeed.ops.op_builder.hpu"
245
+
246
+ # dict that holds class name <--> class type mapping i.e.
247
+ # 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
248
+ # this dict will be filled at init stage
249
+ class_dict = None
250
+
251
+ def _lazy_init_class_dict(self):
252
+ if self.class_dict is not None:
253
+ return
254
+ else:
255
+ self.class_dict = {}
256
+ # begin initialize for create_op_builder()
257
+ # put all valid class name <--> class type mapping into class_dict
258
+ op_builder_dir = self.op_builder_dir()
259
+ op_builder_module = importlib.import_module(op_builder_dir)
260
+ op_builder_absolute_path = os.path.dirname(op_builder_module.__file__)
261
+ for _, module_name, _ in pkgutil.iter_modules([op_builder_absolute_path]):
262
+ # avoid self references,
263
+ # skip sub_directories which contains ops for other backend(cpu, npu, etc.).
264
+ if module_name != 'all_ops' and module_name != 'builder' and not os.path.isdir(
265
+ os.path.join(op_builder_absolute_path, module_name)):
266
+ module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
267
+ for member_name in module.__dir__():
268
+ if member_name.endswith(
269
+ 'Builder'
270
+ ) and member_name != "OpBuilder" and member_name != "CPUOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
271
+ if not member_name in self.class_dict:
272
+ self.class_dict[member_name] = getattr(module, member_name)
273
+ # end initialize for create_op_builder()
274
+
275
+ # create an instance of op builder and return, name specified by class_name
276
+ def create_op_builder(self, class_name):
277
+ self._lazy_init_class_dict()
278
+ if class_name in self.class_dict:
279
+ return self.class_dict[class_name]()
280
+ else:
281
+ return None
282
+
283
+ # return an op builder class, name specified by class_name
284
+ def get_op_builder(self, class_name):
285
+ self._lazy_init_class_dict()
286
+ if class_name in self.class_dict:
287
+ return self.class_dict[class_name]
288
+ else:
289
+ return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None
290
+
291
+ def build_extension(self):
292
+ from torch.utils.cpp_extension import BuildExtension
293
+ return BuildExtension
294
+
295
+ def export_envs(self):
296
+ return []
297
+
298
+ def visible_devices_envs(self):
299
+ return ['HABANA_VISIBLE_MODULES']
300
+
301
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
302
+ for env in self.visible_devices_envs():
303
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/accelerator/mps_accelerator.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .abstract_accelerator import DeepSpeedAccelerator
9
+
10
+ # During setup stage torch may not be installed, pass on no torch will
11
+ # allow op builder related API to be executed.
12
+ try:
13
+ import torch.mps
14
+ except ImportError:
15
+ pass
16
+
17
+
18
+ class MPS_Accelerator(DeepSpeedAccelerator):
19
+
20
+ def __init__(self):
21
+ self._name = "mps"
22
+ self._communication_backend_name = None
23
+
24
+ def is_synchronized_device(self):
25
+ return False
26
+
27
+ def use_host_timers(self):
28
+ return self.is_synchronized_device()
29
+
30
+ def resolves_data_dependency(self):
31
+ return self.is_synchronized_device()
32
+
33
+ def handles_memory_backpressure(self):
34
+ return self.is_synchronized_device()
35
+
36
+ # Device APIs
37
+ def device_name(self, device_index=None):
38
+ if device_index is None:
39
+ return "mps"
40
+ return "mps:{}".format(device_index)
41
+
42
+ def device(self, device_index):
43
+ return torch.device("mps", index=0)
44
+
45
+ def set_device(self, device_index):
46
+ return
47
+
48
+ def current_device(self):
49
+ return torch.device("mps", index=0)
50
+
51
+ def current_device_name(self):
52
+ return "mps:0"
53
+
54
+ def device_count(self):
55
+ return 1
56
+
57
+ def synchronize(self, device_index=None):
58
+ return torch.mps.synchronize()
59
+
60
+ # RNG APIs
61
+ def random(self):
62
+ return torch.random
63
+
64
+ def set_rng_state(self, new_state, device_index=None):
65
+ return torch.mps.set_rng_state(new_state)
66
+
67
+ def get_rng_state(self, device_index=None):
68
+ return torch.mps.get_rng_state()
69
+
70
+ def manual_seed(self, seed):
71
+ return torch.mps.manual_seed(seed)
72
+
73
+ def manual_seed_all(self, seed):
74
+ return torch.mps.manual_seed(seed)
75
+
76
+ def seed(self):
77
+ return torch.mps.seed()
78
+
79
+ def initial_seed(self, seed):
80
+ return
81
+
82
+ def default_generator(self, device_index):
83
+ return
84
+
85
+ # Streams/Events
86
+ @property
87
+ def Stream(self):
88
+ return None
89
+
90
+ def stream(self, stream):
91
+ return None
92
+
93
+ def current_stream(self, device_index=None):
94
+ return None
95
+
96
+ def default_stream(self, device_index=None):
97
+ return None
98
+
99
+ @property
100
+ def Event(self):
101
+ return None
102
+
103
+ # Memory management
104
+ def empty_cache(self):
105
+ return torch.mps.empty_cache()
106
+
107
+ def memory_allocated(self, device_index=None):
108
+ return torch.mps.current_allocated_memory()
109
+
110
+ def max_memory_allocated(self, device_index=None):
111
+ return torch.mps.driver_allocated_memory()
112
+
113
+ def set_per_process_memory_fraction(self, fraction):
114
+ return torch.mps.set_per_process_memory_fraction(fraction)
115
+
116
+ def reset_max_memory_allocated(self, device_index=None):
117
+ return
118
+
119
+ def memory_cached(self, device_index=None):
120
+ return
121
+
122
+ def max_memory_cached(self, device_index=None):
123
+ return
124
+
125
+ def reset_max_memory_cached(self, device_index=None):
126
+ return
127
+
128
+ def memory_stats(self, device_index=None):
129
+ return
130
+
131
+ def reset_peak_memory_stats(self, device_index=None):
132
+ return
133
+
134
+ def memory_reserved(self, device_index=None):
135
+ return
136
+
137
+ def max_memory_reserved(self, device_index=None):
138
+ return
139
+
140
+ def total_memory(self, device_index=None):
141
+ return
142
+
143
+ def available_memory(self, device_index=None):
144
+ return
145
+
146
+ # Data types
147
+ def is_bf16_supported(self):
148
+ return False
149
+
150
+ def is_fp16_supported(self):
151
+ return False
152
+
153
+ def supported_dtypes(self):
154
+ return [torch.float]
155
+
156
+ # Misc
157
+ def amp(self):
158
+ return
159
+
160
+ def is_available(self):
161
+ return hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
162
+
163
+ def range_push(self, msg):
164
+ return
165
+
166
+ def range_pop(self):
167
+ return
168
+
169
+ def lazy_call(self, callback):
170
+ return
171
+
172
+ def communication_backend_name(self):
173
+ return self._communication_backend_name
174
+
175
+ def is_triton_supported(self):
176
+ return False
177
+
178
+ # Graph operations
179
+ def create_graph(self):
180
+ return None
181
+
182
+ def capture_to_graph(self, graph, pool=None, stream=None):
183
+ from deepspeed.runtime.utils import noop_context
184
+ return noop_context()
185
+
186
+ def replay_graph(self, graph):
187
+ return
188
+
189
+ # Tensor operations
190
+ @property
191
+ def BFloat16Tensor(self):
192
+ return
193
+
194
+ @property
195
+ def ByteTensor(self):
196
+ return
197
+
198
+ @property
199
+ def DoubleTensor(self):
200
+ return
201
+
202
+ @property
203
+ def FloatTensor(self):
204
+ return
205
+
206
+ @property
207
+ def HalfTensor(self):
208
+ return
209
+
210
+ @property
211
+ def IntTensor(self):
212
+ return
213
+
214
+ @property
215
+ def LongTensor(self):
216
+ return
217
+
218
+ def pin_memory(self, tensor, align_bytes=1):
219
+ return tensor.pin_memory()
220
+
221
+ def is_pinned(self, tensor):
222
+ return tensor.is_pinned()
223
+
224
+ def on_accelerator(self, tensor):
225
+ device_str = str(tensor.device)
226
+ if device_str.startswith("mps"):
227
+ return True
228
+ else:
229
+ return False
230
+
231
+ def op_builder_dir(self):
232
+ try:
233
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
234
+ # if successful this also means we're doing a local install and not JIT compile path
235
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
236
+
237
+ return "op_builder"
238
+ except ImportError:
239
+ return "deepspeed.ops.op_builder"
240
+
241
+ # create an instance of op builder, specified by class_name
242
+ def create_op_builder(self, op_name):
243
+ builder_class = self.get_op_builder(op_name)
244
+ if builder_class is not None:
245
+ return builder_class()
246
+ return None
247
+
248
+ # return an op builder class, specified by class_name
249
+ def get_op_builder(self, class_name):
250
+ from deepspeed.ops.op_builder.cpu import NotImplementedBuilder
251
+
252
+ return NotImplementedBuilder
253
+
254
+ def build_extension(self):
255
+ from torch.utils.cpp_extension import BuildExtension
256
+
257
+ return BuildExtension
258
+
259
+ def export_envs(self):
260
+ return []
261
+
262
+ # TODO: mpu's visible envs is confirmed, keep as CUDA_VISIBLE_DEVICES
263
+ def visible_devices_envs(self):
264
+ # TODO: could not find visible devices env for mps
265
+ return ['CUDA_VISIBLE_DEVICES']
266
+
267
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
268
+ for env in self.visible_devices_envs():
269
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/accelerator/npu_accelerator.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ import importlib
6
+ import inspect
7
+
8
+ from .abstract_accelerator import DeepSpeedAccelerator
9
+ # During setup stage torch may not be installed, pass on no torch will
10
+ # allow op builder related API to be executed.
11
+ try:
12
+ import torch.npu
13
+ except ImportError:
14
+ pass
15
+
16
+
17
+ class NPU_Accelerator(DeepSpeedAccelerator):
18
+
19
+ def __init__(self):
20
+ super().__init__()
21
+ self._name = 'npu'
22
+ self._communication_backend_name = 'hccl'
23
+ # dict that holds class name <--> class type mapping i.e.
24
+ # 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
25
+ # this dict will be filled at init stage
26
+ self.class_dict = None
27
+
28
+ def is_synchronized_device(self):
29
+ return False
30
+
31
+ def use_host_timers(self):
32
+ return self.is_synchronized_device()
33
+
34
+ def resolves_data_dependency(self):
35
+ return self.is_synchronized_device()
36
+
37
+ def handles_memory_backpressure(self):
38
+ return self.is_synchronized_device()
39
+
40
+ # Device APIs
41
+ def device_name(self, device_index=None):
42
+ if device_index is None:
43
+ return 'npu'
44
+ return 'npu:{}'.format(device_index)
45
+
46
+ def device(self, device_index=None):
47
+ return torch.npu.device(device_index)
48
+
49
+ def set_device(self, device_index):
50
+ torch.npu.set_device(device_index)
51
+
52
+ def current_device(self):
53
+ return torch.npu.current_device()
54
+
55
+ def current_device_name(self):
56
+ return 'npu:{}'.format(torch.npu.current_device())
57
+
58
+ def device_count(self):
59
+ return torch.npu.device_count()
60
+
61
+ def synchronize(self, device_index=None):
62
+ return torch.npu.synchronize(device_index)
63
+
64
+ # RNG APIs
65
+ def random(self):
66
+ return torch.random
67
+
68
+ def set_rng_state(self, new_state, device_index=None):
69
+ if device_index is None:
70
+ return torch.npu.set_rng_state(new_state)
71
+
72
+ return torch.npu.set_rng_state(new_state, device_index)
73
+
74
+ def get_rng_state(self, device_index=None):
75
+ if device_index is None:
76
+ return torch.npu.get_rng_state()
77
+
78
+ return torch.npu.get_rng_state(device_index)
79
+
80
+ def manual_seed(self, seed):
81
+ return torch.npu.manual_seed(seed)
82
+
83
+ def manual_seed_all(self, seed):
84
+ return torch.npu.manual_seed_all(seed)
85
+
86
+ def initial_seed(self, seed):
87
+ return torch.npu.initial_seed(seed)
88
+
89
+ def default_generator(self, device_index):
90
+ return torch.npu.default_generators[device_index]
91
+
92
+ # Streams/Events
93
+ @property
94
+ def Stream(self):
95
+ return torch.npu.Stream
96
+
97
+ def stream(self, stream):
98
+ return torch.npu.stream(stream)
99
+
100
+ def current_stream(self, device_index=None):
101
+ return torch.npu.current_stream(device_index)
102
+
103
+ def default_stream(self, device_index=None):
104
+ return torch.npu.default_stream(device_index)
105
+
106
+ @property
107
+ def Event(self):
108
+ return torch.npu.Event
109
+
110
+ # Memory management
111
+ def empty_cache(self):
112
+ return torch.npu.empty_cache()
113
+
114
+ def memory_allocated(self, device_index=None):
115
+ return torch.npu.memory_allocated(device_index)
116
+
117
+ def max_memory_allocated(self, device_index=None):
118
+ return torch.npu.max_memory_allocated(device_index)
119
+
120
+ def reset_max_memory_allocated(self, device_index=None):
121
+ return torch.npu.reset_max_memory_allocated(device_index)
122
+
123
+ def memory_cached(self, device_index=None):
124
+ return torch.npu.memory_cached(device_index)
125
+
126
+ def max_memory_cached(self, device_index=None):
127
+ return torch.npu.max_memory_cached(device_index)
128
+
129
+ def reset_max_memory_cached(self, device_index=None):
130
+ return torch.npu.reset_max_memory_cached(device_index)
131
+
132
+ def memory_stats(self, device_index=None):
133
+ if hasattr(torch.npu, 'memory_stats'):
134
+ return torch.npu.memory_stats(device_index)
135
+
136
+ def reset_peak_memory_stats(self, device_index=None):
137
+ if hasattr(torch.npu, 'reset_peak_memory_stats'):
138
+ return torch.npu.reset_peak_memory_stats(device_index)
139
+
140
+ def memory_reserved(self, device_index=None):
141
+ if hasattr(torch.npu, 'memory_reserved'):
142
+ return torch.npu.memory_reserved(device_index)
143
+
144
+ def max_memory_reserved(self, device_index=None):
145
+ if hasattr(torch.npu, 'max_memory_reserved'):
146
+ return torch.npu.max_memory_reserved(device_index)
147
+
148
+ def total_memory(self, device_index=None):
149
+ return torch.npu.get_device_properties(device_index).total_memory
150
+
151
+ def available_memory(self, device_index=None):
152
+ return self.total_memory(device_index) - self.memory_allocated(device_index)
153
+
154
+ # Data types
155
+ def is_bf16_supported(self):
156
+ return torch.npu.is_bf16_supported()
157
+
158
+ def is_fp16_supported(self):
159
+ return True
160
+
161
+ def supported_dtypes(self):
162
+ return [torch.float, torch.half, torch.bfloat16]
163
+
164
+ # Misc
165
+ def amp(self):
166
+ if hasattr(torch.npu, 'amp'):
167
+ return torch.npu.amp
168
+ return None
169
+
170
+ def is_available(self):
171
+ return torch.npu.is_available()
172
+
173
+ def range_push(self, msg):
174
+ return
175
+
176
+ def range_pop(self):
177
+ return
178
+
179
+ def lazy_call(self, callback):
180
+ return torch.npu._lazy_call(callback)
181
+
182
+ def communication_backend_name(self):
183
+ return self._communication_backend_name
184
+
185
+ def is_triton_supported(self):
186
+ return False
187
+
188
+ # Graph operations
189
+ def create_graph(self):
190
+ return None
191
+
192
+ def capture_to_graph(self, graph, pool=None, stream=None):
193
+ from deepspeed.runtime.utils import noop_context
194
+ return noop_context()
195
+
196
+ def replay_graph(self, graph):
197
+ return
198
+
199
+ # Tensor operations
200
+
201
+ @property
202
+ def BFloat16Tensor(self):
203
+ return torch.npu.BFloat16Tensor
204
+
205
+ @property
206
+ def ByteTensor(self):
207
+ return torch.npu.ByteTensor
208
+
209
+ @property
210
+ def DoubleTensor(self):
211
+ return torch.npu.DoubleTensor
212
+
213
+ @property
214
+ def FloatTensor(self):
215
+ return torch.npu.FloatTensor
216
+
217
+ @property
218
+ def HalfTensor(self):
219
+ return torch.npu.HalfTensor
220
+
221
+ @property
222
+ def IntTensor(self):
223
+ return torch.npu.IntTensor
224
+
225
+ @property
226
+ def LongTensor(self):
227
+ return torch.npu.LongTensor
228
+
229
+ def pin_memory(self, tensor, align_bytes=1):
230
+ return tensor.pin_memory()
231
+
232
+ def is_pinned(self, tensor):
233
+ return tensor.is_pinned()
234
+
235
+ def on_accelerator(self, tensor):
236
+ device_str = str(tensor.device)
237
+ if device_str.startswith('npu:'):
238
+ return True
239
+ else:
240
+ return False
241
+
242
+ def op_builder_dir(self):
243
+ try:
244
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
245
+ # if successful this also means we're doing a local install and not JIT compile path
246
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
247
+ return "op_builder.npu"
248
+ except ImportError:
249
+ return "deepspeed.ops.op_builder.npu"
250
+
251
+ def _lazy_init_class_dict(self):
252
+ if self.class_dict:
253
+ return
254
+
255
+ op_builder_module = importlib.import_module(self.op_builder_dir())
256
+
257
+ # get op builder class from op_builder/npu/__init__.py
258
+ self.class_dict = {}
259
+ for class_name, class_obj in inspect.getmembers(op_builder_module, inspect.isclass):
260
+ self.class_dict[class_name] = class_obj
261
+
262
+ # create an instance of op builder and return, name specified by class_name
263
+ def create_op_builder(self, class_name):
264
+ builder_class = self.get_op_builder(class_name)
265
+ return None if builder_class is None else builder_class()
266
+
267
+ # return an op builder class, name specified by class_name
268
+ def get_op_builder(self, class_name):
269
+ self._lazy_init_class_dict()
270
+ if class_name in self.class_dict:
271
+ return self.class_dict[class_name]
272
+ else:
273
+ return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None
274
+
275
+ def build_extension(self):
276
+ from torch.utils.cpp_extension import BuildExtension
277
+ return BuildExtension
278
+
279
+ def export_envs(self):
280
+ return ['ASCEND', 'HCCL', 'LD_LIBRARY', 'PATH']
281
+
282
+ def visible_devices_envs(self):
283
+ return ['ASCEND_RT_VISIBLE_DEVICES']
284
+
285
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
286
+ for env in self.visible_devices_envs():
287
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/accelerator/real_accelerator.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ import os
6
+
7
+ try:
8
+ # Importing logger currently requires that torch is installed, hence the try...except
9
+ # TODO: Remove logger dependency on torch.
10
+ from deepspeed.utils import logger as accel_logger
11
+ except ImportError as e:
12
+ accel_logger = None
13
+
14
+ try:
15
+ from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
16
+ except ImportError as e:
17
+ dsa1 = None
18
+ try:
19
+ from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
20
+ except ImportError as e:
21
+ dsa2 = None
22
+
23
+ SUPPORTED_ACCELERATOR_LIST = ['cuda', 'cpu', 'xpu', 'xpu.external', 'npu', 'mps', 'hpu']
24
+
25
+ ds_accelerator = None
26
+
27
+
28
+ def _validate_accelerator(accel_obj):
29
+ # because abstract_accelerator has different path during
30
+ # build time (accelerator.abstract_accelerator)
31
+ # and run time (deepspeed.accelerator.abstract_accelerator)
32
+ # and extension would import the
33
+ # run time abstract_accelerator/DeepSpeedAccelerator as its base
34
+ # class, so we need to compare accel_obj with both base class.
35
+ # if accel_obj is instance of DeepSpeedAccelerator in one of
36
+ # accelerator.abstractor_accelerator
37
+ # or deepspeed.accelerator.abstract_accelerator, consider accel_obj
38
+ # is a conforming object
39
+ if not ((dsa1 is not None and isinstance(accel_obj, dsa1)) or (dsa2 is not None and isinstance(accel_obj, dsa2))):
40
+ raise AssertionError(f"{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator")
41
+
42
+ # TODO: turn off is_available test since this breaks tests
43
+ # assert accel_obj.is_available(), \
44
+ # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
45
+
46
+
47
+ def is_current_accelerator_supported():
48
+ return get_accelerator().device_name() in SUPPORTED_ACCELERATOR_LIST
49
+
50
+
51
+ def get_accelerator():
52
+ global ds_accelerator
53
+ if ds_accelerator is not None:
54
+ return ds_accelerator
55
+
56
+ accelerator_name = None
57
+ ds_set_method = None
58
+ # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
59
+ if "DS_ACCELERATOR" in os.environ.keys():
60
+ accelerator_name = os.environ["DS_ACCELERATOR"]
61
+ if accelerator_name == "xpu":
62
+ try:
63
+ import intel_extension_for_pytorch as ipex
64
+ assert ipex._C._has_xpu(), "XPU_Accelerator requires an intel_extension_for_pytorch that supports XPU."
65
+ except ImportError as e:
66
+ raise ValueError(
67
+ f"XPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.")
68
+ elif accelerator_name == "xpu.external":
69
+ try:
70
+ import intel_extension_for_deepspeed # noqa: F401 # type: ignore
71
+ except ImportError as e:
72
+ raise ValueError(
73
+ f"XPU_Accelerator external requires intel_extension_for_deepspeed, which is not installed on this system."
74
+ )
75
+ elif accelerator_name == "cpu":
76
+ pass
77
+ elif accelerator_name == "npu":
78
+ try:
79
+ import torch_npu # noqa: F401 # type: ignore
80
+ except ImportError as e:
81
+ raise ValueError(f"NPU_Accelerator requires torch_npu, which is not installed on this system.")
82
+ pass
83
+ elif accelerator_name == "mps":
84
+ try:
85
+ import torch.mps
86
+
87
+ # should use torch.mps.is_available() if it exists someday but this is used as proxy
88
+ torch.mps.current_allocated_memory()
89
+ except (RuntimeError, ImportError) as e:
90
+ raise ValueError(f"MPS_Accelerator requires torch.mps, which is not installed on this system.")
91
+ elif accelerator_name == "hpu":
92
+ try:
93
+ import habana_frameworks.torch.hpu # noqa: F401
94
+ except ImportError as e:
95
+ raise ValueError(
96
+ f"HPU_Accelerator requires habana_frameworks.torch.hpu, which is not installed on this system.")
97
+ elif accelerator_name not in SUPPORTED_ACCELERATOR_LIST:
98
+ raise ValueError(f'DS_ACCELERATOR must be one of {SUPPORTED_ACCELERATOR_LIST}. '
99
+ f'Value "{accelerator_name}" is not supported')
100
+ ds_set_method = "override"
101
+
102
+ # 2. If no override, detect which accelerator to use automatically
103
+ if accelerator_name is None:
104
+ # We need a way to choose among different accelerator types.
105
+ # Currently we detect which accelerator extension is installed
106
+ # in the environment and use it if the installing answer is True.
107
+ # An alternative might be detect whether CUDA device is installed on
108
+ # the system but this comes with two pitfalls:
109
+ # 1. the system may not have torch pre-installed, so
110
+ # get_accelerator().is_available() may not work.
111
+ # 2. Some scenario like install on login node (without CUDA device)
112
+ # and run on compute node (with CUDA device) may cause mismatch
113
+ # between installation time and runtime.
114
+
115
+ try:
116
+ from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811 # type: ignore
117
+ accelerator_name = "xpu.external"
118
+ except ImportError as e:
119
+ pass
120
+ if accelerator_name is None:
121
+ try:
122
+ import intel_extension_for_pytorch as ipex
123
+ if ipex._C._has_xpu():
124
+ accelerator_name = "xpu"
125
+ else:
126
+ accelerator_name = "cpu"
127
+ except ImportError as e:
128
+ pass
129
+ if accelerator_name is None:
130
+ try:
131
+ import torch_npu # noqa: F401,F811 # type: ignore
132
+
133
+ accelerator_name = "npu"
134
+ except ImportError as e:
135
+ pass
136
+ if accelerator_name is None:
137
+ try:
138
+ import torch.mps
139
+
140
+ # should use torch.mps.is_available() if it exists someday but this is used as proxy
141
+ torch.mps.current_allocated_memory()
142
+ accelerator_name = "mps"
143
+ except (RuntimeError, ImportError) as e:
144
+ pass
145
+ if accelerator_name is None:
146
+ try:
147
+ import habana_frameworks.torch.hpu # noqa: F401,F811
148
+
149
+ accelerator_name = "hpu"
150
+ except ImportError as e:
151
+ pass
152
+ if accelerator_name is None:
153
+ # borrow this log from PR#5084
154
+ try:
155
+ import torch
156
+
157
+ # Determine if we are on a GPU or x86 CPU with torch.
158
+ if torch.cuda.is_available(): #ignore-cuda
159
+ accelerator_name = "cuda"
160
+ else:
161
+ if accel_logger is not None:
162
+ accel_logger.warn(
163
+ "Setting accelerator to CPU. If you have GPU or other accelerator, we were unable to detect it."
164
+ )
165
+ accelerator_name = "cpu"
166
+ except (RuntimeError, ImportError) as e:
167
+ # TODO need a more decent way to detect which accelerator to use, consider using nvidia-smi command for detection
168
+ accelerator_name = "cuda"
169
+ pass
170
+
171
+ ds_set_method = "auto detect"
172
+
173
+ # 3. Set ds_accelerator accordingly
174
+ if accelerator_name == "cuda":
175
+ from .cuda_accelerator import CUDA_Accelerator
176
+
177
+ ds_accelerator = CUDA_Accelerator()
178
+ elif accelerator_name == "cpu":
179
+ from .cpu_accelerator import CPU_Accelerator
180
+
181
+ ds_accelerator = CPU_Accelerator()
182
+ elif accelerator_name == "xpu.external":
183
+ # XPU_Accelerator is already imported in detection stage
184
+ ds_accelerator = XPU_Accelerator()
185
+ elif accelerator_name == "xpu":
186
+ from .xpu_accelerator import XPU_Accelerator
187
+
188
+ ds_accelerator = XPU_Accelerator()
189
+ elif accelerator_name == "npu":
190
+ from .npu_accelerator import NPU_Accelerator
191
+
192
+ ds_accelerator = NPU_Accelerator()
193
+ elif accelerator_name == "mps":
194
+ from .mps_accelerator import MPS_Accelerator
195
+
196
+ ds_accelerator = MPS_Accelerator()
197
+ elif accelerator_name == 'hpu':
198
+ from .hpu_accelerator import HPU_Accelerator
199
+
200
+ ds_accelerator = HPU_Accelerator()
201
+ _validate_accelerator(ds_accelerator)
202
+ if accel_logger is not None:
203
+ accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
204
+ return ds_accelerator
205
+
206
+
207
+ def set_accelerator(accel_obj):
208
+ global ds_accelerator
209
+ _validate_accelerator(accel_obj)
210
+ if accel_logger is not None:
211
+ accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
212
+ ds_accelerator = accel_obj
213
+
214
+
215
+ """
216
+ -----------[code] test_get.py -----------
217
+ from deepspeed.accelerator import get_accelerator
218
+ my_accelerator = get_accelerator()
219
+ logger.info(f'{my_accelerator._name=}')
220
+ logger.info(f'{my_accelerator._communication_backend=}')
221
+ logger.info(f'{my_accelerator.HalfTensor().device=}')
222
+ logger.info(f'{my_accelerator.total_memory()=}')
223
+ -----------[code] test_get.py -----------
224
+
225
+ ---[output] python test_get.py---------
226
+ my_accelerator.name()='cuda'
227
+ my_accelerator.communication_backend='nccl'
228
+ my_accelerator.HalfTensor().device=device(type='cuda', index=0)
229
+ my_accelerator.total_memory()=34089730048
230
+ ---[output] python test_get.py---------
231
+
232
+ **************************************************************************
233
+ -----------[code] test_set.py -----------
234
+ from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
235
+ cu_accel = CUDA_Accelerator()
236
+ logger.info(f'{id(cu_accel)=}')
237
+ from deepspeed.accelerator import set_accelerator, get_accelerator
238
+ set_accelerator(cu_accel)
239
+
240
+ my_accelerator = get_accelerator()
241
+ logger.info(f'{id(my_accelerator)=}')
242
+ logger.info(f'{my_accelerator._name=}')
243
+ logger.info(f'{my_accelerator._communication_backend=}')
244
+ logger.info(f'{my_accelerator.HalfTensor().device=}')
245
+ logger.info(f'{my_accelerator.total_memory()=}')
246
+ -----------[code] test_set.py -----------
247
+
248
+
249
+ ---[output] python test_set.py---------
250
+ id(cu_accel)=139648165478304
251
+ my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
252
+ my_accelerator.name='cuda'
253
+ my_accelerator.communication_backend='nccl'
254
+ my_accelerator.HalfTensor().device=device(type='cuda', index=0)
255
+ my_accelerator.total_memory()=34089730048
256
+ ---[output] python test_set.py---------
257
+ """
venv/lib/python3.10/site-packages/deepspeed/accelerator/xpu_accelerator.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
8
+ import intel_extension_for_pytorch as ipex # noqa: F401 # type: ignore
9
+ import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore
10
+
11
+
12
+ class XPU_Accelerator(DeepSpeedAccelerator):
13
+
14
+ def __init__(self):
15
+ self._name = 'xpu'
16
+ self._communication_backend_name = 'ccl'
17
+ self.aligned_tensors = []
18
+
19
+ def is_synchronized_device(self):
20
+ return False
21
+
22
+ def use_host_timers(self):
23
+ return self.is_synchronized_device()
24
+
25
+ def resolves_data_dependency(self):
26
+ return self.is_synchronized_device()
27
+
28
+ def handles_memory_backpressure(self):
29
+ return self.is_synchronized_device()
30
+
31
+ # Device APIs
32
+ def device_name(self, device_index=None):
33
+ if device_index == None:
34
+ return 'xpu'
35
+ return 'xpu:{}'.format(device_index)
36
+
37
+ def device(self, device_index=None):
38
+ return torch.xpu.device(device_index)
39
+
40
+ def set_device(self, device_index):
41
+ torch.xpu.set_device(device_index)
42
+
43
+ def current_device(self):
44
+ return torch.xpu.current_device()
45
+
46
+ def current_device_name(self):
47
+ return 'xpu:{}'.format(torch.xpu.current_device())
48
+
49
+ def device_count(self):
50
+ return torch.xpu.device_count()
51
+
52
+ def synchronize(self, device_index=None):
53
+ return torch.xpu.synchronize(device_index)
54
+
55
+ # RNG APIs
56
+ def random(self):
57
+ return torch.xpu.random
58
+
59
+ def set_rng_state(self, new_state, device_index=None):
60
+ if device_index == None:
61
+ return torch.xpu.set_rng_state(new_state)
62
+ return torch.xpu.set_rng_state(new_state, device_index)
63
+
64
+ def get_rng_state(self, device_index=None):
65
+ if device_index == None:
66
+ return torch.xpu.get_rng_state()
67
+ return torch.xpu.get_rng_state(device_index)
68
+
69
+ def manual_seed(self, seed):
70
+ return torch.xpu.manual_seed(seed)
71
+
72
+ def manual_seed_all(self, seed):
73
+ return torch.xpu.manual_seed_all(seed)
74
+
75
+ def initial_seed(self, seed):
76
+ return torch.xpu.initial_seed(seed)
77
+
78
+ def default_generator(self, device_index):
79
+ return torch.xpu.default_generators[device_index]
80
+
81
+ # Streams/Events
82
+ @property
83
+ def Stream(self):
84
+ return torch.xpu.Stream
85
+
86
+ def stream(self, stream):
87
+ return torch.xpu.stream(stream)
88
+
89
+ def current_stream(self, device_index=None):
90
+ return torch.xpu.current_stream(device_index)
91
+
92
+ def default_stream(self, device_index=None):
93
+ # torch.xpu does not support the sync behavior of default stream as cuda
94
+ # use current_stream as workaround
95
+ # see https://pytorch.org/docs/stable/notes/cuda.html#cuda-streams
96
+ return torch.xpu.current_stream(device_index)
97
+
98
+ @property
99
+ def Event(self):
100
+ return torch.xpu.Event
101
+
102
+ # Memory management
103
+ def empty_cache(self):
104
+ return torch.xpu.empty_cache()
105
+
106
+ def memory_allocated(self, device_index=None):
107
+ return torch.xpu.memory_allocated(device_index)
108
+
109
+ def max_memory_allocated(self, device_index=None):
110
+ return torch.xpu.max_memory_allocated(device_index)
111
+
112
+ def reset_max_memory_allocated(self, device_index=None):
113
+ return torch.xpu.reset_max_memory_allocated(device_index)
114
+
115
+ def memory_cached(self, device_index=None):
116
+ return torch.xpu.memory_reserved(device_index)
117
+
118
+ def max_memory_cached(self, device_index=None):
119
+ return torch.xpu.max_memory_reserved(device_index)
120
+
121
+ def reset_max_memory_cached(self, device_index=None):
122
+ return torch.xpu.reset_max_memory_reserved(device_index)
123
+
124
+ def memory_stats(self, device_index=None):
125
+ return torch.xpu.memory_stats(device_index)
126
+
127
+ def reset_peak_memory_stats(self, device_index=None):
128
+ return torch.xpu.reset_peak_memory_stats(device_index)
129
+
130
+ def memory_reserved(self, device_index=None):
131
+ return torch.xpu.memory_reserved(device_index)
132
+
133
+ def max_memory_reserved(self, device_index=None):
134
+ return torch.xpu.max_memory_reserved(device_index)
135
+
136
+ def total_memory(self, device_index=None):
137
+ return torch.xpu.get_device_properties(device_index).total_memory
138
+
139
+ def available_memory(self, device_index=None):
140
+ return self.total_memory(device_index) - self.memory_allocated(device_index)
141
+
142
+ # Misc
143
+ def amp(self):
144
+ return torch.xpu.amp
145
+
146
+ def is_available(self):
147
+ return torch.xpu.is_available()
148
+
149
+ def range_push(self, msg):
150
+ # TODO itt is currently not supported yet
151
+ # return torch.profiler.itt.range_push(msg)
152
+ return
153
+
154
+ def range_pop(self):
155
+ # TODO itt is currently not supported yet
156
+ # return torch.profiler.itt.range_pop()
157
+ return
158
+
159
+ def lazy_call(self, callback):
160
+ return torch.xpu.lazy_init._lazy_call(callback)
161
+
162
+ def communication_backend_name(self):
163
+ return self._communication_backend_name
164
+
165
+ def is_triton_supported(self):
166
+ return False
167
+
168
+ # Graph operations
169
+ def create_graph(self):
170
+ return None
171
+
172
+ def capture_to_graph(self, graph, pool=None, stream=None):
173
+ from deepspeed.runtime.utils import noop_context
174
+ return noop_context()
175
+
176
+ def replay_graph(self, graph):
177
+ return
178
+
179
+ # Data types
180
+ def is_bf16_supported(self):
181
+ return True
182
+
183
+ def is_fp16_supported(self):
184
+ return True
185
+
186
+ def supported_dtypes(self):
187
+ return [torch.float, torch.half, torch.bfloat16]
188
+
189
+ # Tensor operations
190
+
191
+ @property
192
+ def BFloat16Tensor(self):
193
+ return torch.xpu.BFloat16Tensor
194
+
195
+ @property
196
+ def ByteTensor(self):
197
+ return torch.xpu.ByteTensor
198
+
199
+ @property
200
+ def DoubleTensor(self):
201
+ return torch.xpu.DoubleTensor
202
+
203
+ @property
204
+ def FloatTensor(self):
205
+ return torch.xpu.FloatTensor
206
+
207
+ @property
208
+ def HalfTensor(self):
209
+ return torch.xpu.HalfTensor
210
+
211
+ @property
212
+ def IntTensor(self):
213
+ return torch.xpu.IntTensor
214
+
215
+ @property
216
+ def LongTensor(self):
217
+ return torch.xpu.LongTensor
218
+
219
+ def pin_memory(self, tensor, align_bytes=1):
220
+ if align_bytes == 1:
221
+ return tensor.pin_memory(device=self.current_device_name())
222
+ elif align_bytes == 0:
223
+ from intel_extension_for_deepspeed.op_builder.async_io import AsyncIOBuilder
224
+ self.aio_handle = AsyncIOBuilder().load().aio_handle(128 * 1024, 8, False, False, False)
225
+ aligned_t = self.aio_handle.new_cpu_locked_tensor(tensor.numel(), tensor)
226
+ aligned_t = aligned_t[:tensor.numel()].copy_(tensor)
227
+ self.aligned_tensors.append([aligned_t.data_ptr(), aligned_t[-1].data_ptr()])
228
+ return aligned_t
229
+
230
+ def is_pinned(self, tensor):
231
+ if tensor.is_pinned(device=self.current_device_name()):
232
+ return True
233
+ else:
234
+ for begin, end in self.aligned_tensors:
235
+ if begin <= tensor.data_ptr() and tensor.data_ptr() <= end:
236
+ return True
237
+ return False
238
+
239
+ def op_builder_dir(self):
240
+ try:
241
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
242
+ # if successful this also means we're doing a local install and not JIT compile path
243
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
244
+ return "op_builder.xpu"
245
+ except ImportError:
246
+ return "deepspeed.ops.op_builder.xpu"
247
+
248
+ def on_accelerator(self, tensor):
249
+ device_str = str(tensor.device)
250
+ if device_str.startswith('xpu:'):
251
+ return True
252
+ else:
253
+ return False
254
+
255
+ # create an instance of op builder and return, name specified by class_name
256
+ def create_op_builder(self, op_name):
257
+ builder_class = self.get_op_builder(op_name)
258
+ if builder_class != None:
259
+ return builder_class()
260
+ return None
261
+
262
+ # return an op builder class, name specified by class_name
263
+ def get_op_builder(self, class_name):
264
+ try:
265
+ # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
266
+ # if successful this also means we're doing a local install and not JIT compile path
267
+ from op_builder import __deepspeed__ # noqa: F401 # type: ignore
268
+ from op_builder.xpu import CPUAdagradBuilder, CPUAdamBuilder, FusedAdamBuilder, AsyncIOBuilder
269
+ except ImportError:
270
+ from deepspeed.ops.op_builder.xpu import CPUAdagradBuilder, CPUAdamBuilder, FusedAdamBuilder, AsyncIOBuilder
271
+
272
+ if class_name == "AsyncIOBuilder":
273
+ return AsyncIOBuilder
274
+ elif class_name == "CPUAdagradBuilder":
275
+ return CPUAdagradBuilder
276
+ elif class_name == "CPUAdamBuilder":
277
+ return CPUAdamBuilder
278
+ elif class_name == "FusedAdamBuilder":
279
+ return FusedAdamBuilder
280
+ else:
281
+ return None
282
+
283
+ def build_extension(self):
284
+ try:
285
+ from intel_extension_for_pytorch.xpu.cpp_extension import DpcppBuildExtension
286
+ except ImportError:
287
+ from intel_extension_for_pytorch.xpu.utils import DpcppBuildExtension
288
+ return DpcppBuildExtension
289
+
290
+ def export_envs(self):
291
+ return []
292
+
293
+ def visible_devices_envs(self):
294
+ return ['ZE_AFFINITY_MASK']
295
+
296
+ def set_visible_devices_envs(self, current_env, local_accelerator_ids):
297
+ for env in self.visible_devices_envs():
298
+ current_env[env] = ",".join(map(str, local_accelerator_ids))
venv/lib/python3.10/site-packages/deepspeed/autotuning/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .autotuner import Autotuner
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (231 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/autotuner.cpython-310.pyc ADDED
Binary file (30.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/config.cpython-310.pyc ADDED
Binary file (3.66 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/constants.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/autotuner.py ADDED
@@ -0,0 +1,1113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import shutil
7
+ import subprocess
8
+ import time
9
+ import datetime
10
+ import math
11
+ import hjson
12
+
13
+ from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
14
+ from ..runtime.constants import *
15
+
16
+ from ..runtime.zero.config import ZERO_OPTIMIZATION, ZeroStageEnum
17
+ from ..utils import logger
18
+ from .config import DeepSpeedAutotuningConfig
19
+ from .constants import *
20
+ from .scheduler import ResourceManager
21
+ from .tuner import GridSearchTuner, RandomTuner, ModelBasedTuner
22
+ from .utils import *
23
+ from deepspeed.accelerator import get_accelerator
24
+
25
+ try:
26
+ from tabulate import tabulate
27
+ except ImportError:
28
+ tabulate = None
29
+
30
+ try:
31
+ import mlflow
32
+ has_mlflow = True
33
+ except Exception as e:
34
+ has_mlflow = False
35
+
36
+ ZERO_OPTIMIZATION_STAGE = "stage"
37
+ OFFLOAD_OPTIMIZER = "offload_optimizer"
38
+ OFFLOAD_PARAM = "offload_param"
39
+ ZERO_OPTIMIZATION_STAGE_DEFAULT = ZeroStageEnum.disabled
40
+
41
+
42
+ class Autotuner:
43
+ """The DeepSpeed Autotuner automatically discovers the optimal DeepSpeed configuration that delivers good training speed. The Autotuner uses model information, system information, and heuristics to efficiently tune system knobs that affect compute and memory efficiencies, such as ZeRO optimization stages, micro-batch sizes, and many other ZeRO optimization configurations. It not only reduces the time and resources user spend on tuning, but also can discover configurations better than hand-tuned methods.
44
+ Autotuning with DeepSpeed requires no code change from DeepSpeed users. Please refer to the README for usage details.
45
+ """
46
+
47
+ def __init__(self, args, active_resources):
48
+ self.args = args
49
+ self.selected_exp_dir = None
50
+
51
+ assert tabulate is not None, "Missing required package `tabulate`, please install with `pip install deepspeed[autotuning]`."
52
+
53
+ logger.debug(f"autotuning args={args}")
54
+
55
+ self.user_config = self._get_user_config(args.user_args)
56
+ assert self.user_config is not None, "DeepSpeed configuration is not provided"
57
+
58
+ self.autotuning_config = DeepSpeedAutotuningConfig(self.user_config)
59
+ if self.user_config[AUTOTUNING]:
60
+ if AUTOTUNING_EXPS_DIR in self.user_config[AUTOTUNING].keys():
61
+ del self.user_config[AUTOTUNING][AUTOTUNING_EXPS_DIR]
62
+ if AUTOTUNING_RESULTS_DIR in self.user_config[AUTOTUNING].keys():
63
+ del self.user_config[AUTOTUNING][AUTOTUNING_RESULTS_DIR]
64
+
65
+ self.exps_dir = self.autotuning_config.exps_dir
66
+ if self.autotuning_config.overwrite and os.path.exists(self.exps_dir):
67
+ shutil.rmtree(self.exps_dir, ignore_errors=True)
68
+ if not os.path.exists(self.exps_dir):
69
+ try:
70
+ os.makedirs(self.exps_dir, exist_ok=True)
71
+ logger.info(f"Created autotuning experiments directory: {self.exps_dir}")
72
+ except:
73
+ logger.error(
74
+ f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job."
75
+ )
76
+ exit(-1)
77
+
78
+ self.results_dir = self.autotuning_config.results_dir
79
+ if self.autotuning_config.overwrite and os.path.exists(self.results_dir):
80
+ shutil.rmtree(self.results_dir, ignore_errors=True)
81
+ if not os.path.exists(self.results_dir):
82
+ try:
83
+ os.makedirs(self.results_dir, exist_ok=True)
84
+ logger.info(f"Created autotuning results directory: {self.exps_dir}")
85
+ except:
86
+ logger.error(
87
+ f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job."
88
+ )
89
+ exit(-1)
90
+
91
+ # set the active resource for the autotuner resource manager
92
+ self.rm = self._get_resource_manager(active_resources)
93
+
94
+ # get resource requirement for each autotuning experiment
95
+ self.exp_num_nodes, self.exp_num_gpus = self._get_exp_resources(args)
96
+
97
+ assert self.exp_num_gpus <= self.rm.num_gpus_per_node, "num_gpus in the autotuning configuration must not be less than the --num_gpus value in the train script if any"
98
+ assert self.exp_num_nodes <= len(
99
+ self.rm.nodes
100
+ ), "num_nodes in the autotuning configuration must not be less than the --num_nodes value in the train script if any"
101
+
102
+ self.records = {}
103
+ self.optimal_cmd = None
104
+ self.optimal_ds_config = None
105
+
106
+ self.mlflow_parent_id = None
107
+
108
+ def print_tuning_results(self):
109
+ """Print the autotuning results in tabular format.
110
+ """
111
+ best_space_records = self.get_best_space_records()
112
+ tab = []
113
+ if best_space_records:
114
+ for key, val in best_space_records.items():
115
+ if not val:
116
+ continue
117
+ row = []
118
+ row.append(key)
119
+ num_exps = 0
120
+ if key == GLOBAL_TUNING_SPACE:
121
+ cnt = 0
122
+ for k, v in best_space_records.items():
123
+ if k != GLOBAL_TUNING_SPACE:
124
+ cnt += v[2]
125
+ num_exps = cnt
126
+ else:
127
+ num_exps = val[2]
128
+ row.append(num_exps)
129
+ row.append(val[1])
130
+ row.append(val[0]['name'])
131
+ tab.append(row)
132
+ summary = tabulate(tab,
133
+ headers=["tuning_space", "num_experiments", "best_metric_val", "best_exp_name"],
134
+ tablefmt="pipe")
135
+ print(summary)
136
+ with open(os.path.join(self.results_dir, 'summary.txt'), 'w', buffering=BUFSIZE) as fd:
137
+ fd.write(summary)
138
+ fd.flush()
139
+ os.fsync(fd)
140
+
141
+ if GLOBAL_TUNING_SPACE in best_space_records:
142
+ best_exp, best_metric_val, total_num_exps = best_space_records[GLOBAL_TUNING_SPACE]
143
+ if best_exp:
144
+ logger.info(
145
+ f"{best_exp['name']} is the optimal setup after tuning. The exp result is at {best_exp['result_dir']}."
146
+ )
147
+ else:
148
+ logger.info(f"No optimal setup is found. Please check that experiments were run successfully.")
149
+ tuning_duration = datetime.timedelta(seconds=(time.time() - self.start_time))
150
+
151
+ logger.info(f"Tuning completed in {tuning_duration}")
152
+ with open(os.path.join(self.results_dir, 'summary.txt'), 'a') as f:
153
+ f.write(
154
+ f"\n\nTuning completed in {tuning_duration}. Total number of experiments: {self.rm.experiment_count - 1}."
155
+ )
156
+ f.flush()
157
+
158
+ def _get_user_config(self, user_args):
159
+ """Get DeepSpeed configuration from the user arguments passed to the launcher.
160
+
161
+ Args:
162
+ user_args ([list]): user arguments passed to the DeepSpeed launcher
163
+
164
+ Returns:
165
+ [dict]: DeepSpeed configuration dictionary
166
+ """
167
+ user_config_file = None
168
+ if "--deepspeed_config" in user_args:
169
+ idx = user_args.index("--deepspeed_config")
170
+ assert ".json" in user_args[
171
+ idx + 1], "DeepSpeed --deepspeed_config requires a json file to specify the configuration"
172
+
173
+ user_config_file = user_args[idx + 1]
174
+ elif "--deepspeed" in user_args:
175
+ idx = user_args.index("--deepspeed")
176
+ if ".json" in user_args[idx + 1]:
177
+ user_config_file = user_args[idx + 1]
178
+
179
+ logger.debug(f"user_config_file = {user_config_file}")
180
+ if user_config_file is not None:
181
+ assert os.path.isfile(user_config_file), "DeepSpeed configuration file: {} is not an existing file".format(
182
+ user_config_file)
183
+ if os.path.exists(user_config_file):
184
+ return json.load(open(user_config_file, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
185
+
186
+ return None
187
+
188
+ def _get_resource_manager(self, active_resources):
189
+ """Initialize and return a resource manager
190
+
191
+ Args:
192
+ active_resources ([dict]): A dictionary of hostname and its slots (GPUs), e.g. {"worker-0": "0,1,2,3,4,5,6,7,8"}
193
+
194
+ Raises:
195
+ RuntimeError: raises the error if no GPU is available
196
+
197
+ Returns:
198
+ [ResourceManager]: A resource manager that schedules and runs autotuning experiments.
199
+ """
200
+ logger.info(f"active_resources = {active_resources}")
201
+
202
+ hosts = []
203
+ ngpus_per_node = 100
204
+ for hostname, slots in active_resources.items():
205
+ hosts.append(hostname)
206
+ ngpus_per_node = min(len(slots), ngpus_per_node)
207
+
208
+ assert ngpus_per_node > 0, "no gpu is available"
209
+
210
+ return ResourceManager(args=self.args,
211
+ hosts=hosts,
212
+ num_gpus_per_node=ngpus_per_node,
213
+ results_dir=self.results_dir,
214
+ exps_dir=self.exps_dir,
215
+ arg_mappings=self.autotuning_config.arg_mappings)
216
+
217
+ def _get_exp_resources(self, args):
218
+ """Get resource requirement for each autotuning experiment
219
+
220
+ Args:
221
+ args (dict): user args
222
+
223
+ Returns:
224
+ num_nodes, num_gpus: the number of gpus and number of nodes used in the autotuning experiments
225
+ """
226
+ if args.num_nodes > 0:
227
+ num_nodes = args.num_nodes
228
+ else:
229
+ num_nodes = len(self.rm.nodes)
230
+
231
+ if args.num_gpus > 0:
232
+ num_gpus = args.num_gpus
233
+ else:
234
+ num_gpus = self.rm.num_gpus_per_node
235
+
236
+ return num_nodes, num_gpus
237
+
238
+ def metric(self):
239
+ return self.autotuning_config.metric
240
+
241
+ def fast_enabled(self):
242
+ return self.autotuning_config.fast
243
+
244
+ def max_train_batch_size(self):
245
+ return self.autotuning_config.max_train_batch_size
246
+
247
+ def mp_size(self):
248
+ return self.autotuning_config.mp_size
249
+
250
+ def max_train_micro_batch_size_per_gpu(self):
251
+ if self.max_train_batch_size(
252
+ ) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
253
+ max_train_micro_batch_size = self.max_train_batch_size() * self.mp_size() // (
254
+ self.exp_num_gpus * self.exp_num_nodes) # gradient accumulation steps >=1
255
+ return min(self.autotuning_config.max_train_micro_batch_size_per_gpu, max_train_micro_batch_size)
256
+ else:
257
+ return self.autotuning_config.max_train_micro_batch_size_per_gpu
258
+
259
+ def min_train_micro_batch_size_per_gpu(self):
260
+ return self.autotuning_config.min_train_micro_batch_size_per_gpu
261
+
262
+ def num_tuning_micro_batch_sizes(self):
263
+ return self.autotuning_config.num_tuning_micro_batch_sizes
264
+
265
+ def fp16_enabled(self):
266
+ if FP16 in self.user_config.keys():
267
+ return self.user_config[FP16].get(FP16_ENABLED, FP16_ENABLED_DEFAULT)
268
+ else:
269
+ return False
270
+
271
+ def get_gpu_memory_info(self):
272
+ return get_accelerator().total_memory()
273
+
274
+ def get_activation_memory_per_gpu(self):
275
+ if self.model_info and "activation_mem_per_gpu" in self.model_info:
276
+ return self.model_info["activation_mem_per_gpu"]
277
+
278
+ def get_instantiation_memory_required_per_gpu(self, zero_stage):
279
+ num_params = self.get_model_num_params()
280
+ total_gpus = self.exp_num_nodes * self.exp_num_gpus
281
+ fp16_enabled = self.fp16_enabled()
282
+
283
+ if not num_params:
284
+ return 0
285
+ # assume the model uses Adam optimizer
286
+ # ZeroStageEnum.disabled:
287
+ params_mem = num_params * (2 if fp16_enabled else 4)
288
+ gradients_mem = num_params * (2 if fp16_enabled else 4)
289
+ optimizer_mem = num_params * (16 if fp16_enabled else 8)
290
+
291
+ if zero_stage >= ZeroStageEnum.optimizer_states:
292
+ optimizer_mem = optimizer_mem / total_gpus
293
+
294
+ if zero_stage >= ZeroStageEnum.gradients:
295
+ gradients_mem = gradients_mem / total_gpus
296
+
297
+ if zero_stage >= ZeroStageEnum.weights:
298
+ params_mem = params_mem / total_gpus
299
+
300
+ mem_per_gpu = (params_mem + gradients_mem + optimizer_mem) / self.mp_size()
301
+
302
+ return mem_per_gpu
303
+
304
+ def _generate_experiments(self, tuning_space, max_train_batch_size_per_gpu):
305
+ """Generates a list of autotuning experiments given a tuning_space.
306
+ The corresponding parameter values are replaced by user-defined values in the DeepSpeed configuration file.
307
+ Args:
308
+ tuning_space ([dict]): A DeepSpeed configuration dictionary where a value can be a list (called a tuning parameter). For example,
309
+ {
310
+ "zero_optimization": {
311
+ "stage": 1,
312
+ "reduce_bucket_size": [5e7,
313
+ 5e8,
314
+ 1e9],
315
+ "allgather_bucket_size": [5e7,
316
+ 5e8,
317
+ 1e9],
318
+ }
319
+ }
320
+ reduce_bucket_size and allgather_bucket_size are the tuning parameters in this tuning space.
321
+ Returns:
322
+ [list]: a list of experiments generated by taking combinations of values of the tuning space. The above tuning space generates 3*3 = 9 experiments if the user DeepSpeed configuration file does not overwrite the two tuning parameters or define more tuning parameters.
323
+ """
324
+ exps = []
325
+
326
+ # each zero stage uses a different template configuration file
327
+ config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
328
+ stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT)
329
+ template_config = {}
330
+ if stage == 0:
331
+ template_path = DEFAULT_TEMPLATE_PATH_ZERO_0
332
+ template_config = hjson.load(open(template_path, 'r'))
333
+ prefix = "z0_"
334
+
335
+ elif stage == 1:
336
+ template_path = DEFAULT_TEMPLATE_PATH_ZERO_1
337
+ template_config = hjson.load(open(template_path, 'r'))
338
+ prefix = "z1_"
339
+
340
+ elif stage == 2:
341
+ template_path = DEFAULT_TEMPLATE_PATH_ZERO_2
342
+ template_config = hjson.load(open(template_path, 'r'))
343
+ prefix = "z2_"
344
+
345
+ elif stage == 3:
346
+ template_path = DEFAULT_TEMPLATE_PATH_ZERO_3
347
+ template_config = hjson.load(open(template_path, 'r'))
348
+ model_info = self.model_info
349
+ if model_info and "hidden_size" in model_info:
350
+ hs = model_info["hidden_size"]
351
+ template_config[ZERO_OPTIMIZATION]['reduce_bucket_size'] = hs * hs
352
+ template_config[ZERO_OPTIMIZATION]['stage3_prefetch_bucket_size'] = 0.9 * hs * hs
353
+ template_config[ZERO_OPTIMIZATION]['stage3_param_persistence_threshold'] = 10 * hs
354
+ prefix = "z3_"
355
+ else:
356
+ return exps
357
+
358
+ # replace the corresponding parameter values if the user specifies them in the DeepSpeed configuration file
359
+ replace_dict(tuning_space, self.user_config, [ZERO_OPTIMIZATION, TRAIN_MICRO_BATCH_SIZE_PER_GPU])
360
+
361
+ logger.debug(f"tuning_space = {json.dumps(tuning_space)}")
362
+
363
+ all_configs = get_all_configs(tuning_space, ignore_keys=["optimizer"])
364
+
365
+ tuning_keys = get_tuning_keys(tuning_space)
366
+
367
+ logger.debug(f"tuning_keys = {tuning_keys}")
368
+
369
+ logger.debug(f"before pruning total configs = {len(all_configs)}")
370
+
371
+ pruned_list = prune_configs(all_configs)
372
+
373
+ logger.debug(f"after pruning total configs = {len(pruned_list)}")
374
+
375
+ for config in pruned_list:
376
+ exp_config = copy.deepcopy(template_config)
377
+ # fill the template with the expr config
378
+ replace_dict(exp_config, config)
379
+
380
+ # if the config does not use offloading, remove the offloading section
381
+ config_zero = config.get(ZERO_OPTIMIZATION, None)
382
+ if config_zero:
383
+ if OFFLOAD_OPTIMIZER not in config_zero and OFFLOAD_OPTIMIZER in exp_config[ZERO_OPTIMIZATION]:
384
+ del exp_config[ZERO_OPTIMIZATION][OFFLOAD_OPTIMIZER]
385
+ if OFFLOAD_PARAM not in config_zero and OFFLOAD_PARAM in exp_config[ZERO_OPTIMIZATION]:
386
+ del exp_config[ZERO_OPTIMIZATION][OFFLOAD_PARAM]
387
+ # set gradient accumulation steps according to max_train_batch_size_per_gpu
388
+ mbs = exp_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU]
389
+ gas = max_train_batch_size_per_gpu // mbs
390
+ exp_config[GRADIENT_ACCUMULATION_STEPS] = gas
391
+ exp_config[TRAIN_BATCH_SIZE] = mbs * gas * \
392
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
393
+ exp = {}
394
+ # generate the expr name
395
+ exp_name = canonical_name(exp_config, tuning_keys, prefix)
396
+ exp['name'] = exp_name
397
+ exp[DS_CONFIG] = exp_config
398
+ exp['num_gpus'] = self.exp_num_gpus
399
+ exp['num_nodes'] = self.exp_num_nodes
400
+ exps.append(exp)
401
+
402
+ return exps
403
+
404
+ def tune(self):
405
+ """ Tunes Zero stages, micro batch size per GPU, and other Zero configurations. Performance metrics of different tuning spaces are recorded in self.records.
406
+ """
407
+ if has_mlflow:
408
+ self.mlflow_parent_id = os.environ['MLFLOW_RUN_ID']
409
+ mlflow.start_run(run_id=self.mlflow_parent_id)
410
+
411
+ self.start_time = time.time()
412
+ if self.fast_enabled():
413
+ logger.info(f"Fast mode is enabled. Tuning micro batch size only.")
414
+
415
+ # model info profile run with DEFAULT_MIN_MEM_CONFIG
416
+ model_info = self.model_info_profile_run()
417
+ if model_info:
418
+ self.model_info = model_info
419
+ else:
420
+ return
421
+
422
+ logger.info(f"The model has {number_to_string(self.get_model_num_params())} parameters.")
423
+
424
+ self.gpu_mem = self.get_gpu_memory_info()
425
+ logger.info(f"Memory per GPU in the system is {memory_to_string(self.gpu_mem, postfix='B')}.")
426
+
427
+ self.activation_mem = self.get_activation_memory_per_gpu()
428
+ logger.info(
429
+ f"The model requires at least {memory_to_string(self.activation_mem, postfix='B')} activation memory for micro batch size 1."
430
+ )
431
+
432
+ stage = self.user_config.get(ZERO_OPTIMIZATION, {}).get(ZERO_OPTIMIZATION_STAGE, 0)
433
+
434
+ user_zero_stages = [stage] if not isinstance(stage, list) else stage
435
+ logger.info(f"User-defined zero stages are {stage}.")
436
+
437
+ mbs = 0
438
+ max_mbs = 0
439
+ metric_val = 0
440
+
441
+ required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.disabled) + self.activation_mem
442
+ if self.gpu_mem > required_gpu_mem:
443
+ if "all" in user_zero_stages or ZeroStageEnum.disabled in user_zero_stages:
444
+ logger.info(
445
+ f"The model might be runable with ZERO 0 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1), adding DEFAULT_TUNING_SPACE_ZERO_0 to the global tuning space"
446
+ )
447
+ next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_0)
448
+ if next_mbs > mbs:
449
+ mbs = next_mbs
450
+ max_mbs = next_max_mbs
451
+ metric_val = next_metric_val
452
+ if has_mlflow:
453
+ mlflow.log_metric(f"z0{self.metric()}", next_metric_val)
454
+ else:
455
+ logger.info(
456
+ f"The model is not runable with ZERO stage {ZeroStageEnum.disabled} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
457
+ )
458
+
459
+ required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
460
+ ZeroStageEnum.optimizer_states) + self.activation_mem
461
+ if self.gpu_mem > required_gpu_mem:
462
+ if "all" in user_zero_stages or ZeroStageEnum.optimizer_states in user_zero_stages:
463
+ logger.info(
464
+ f"The model might be runable with ZERO 1 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_1 to the global tuning space"
465
+ )
466
+ next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_1,
467
+ prev_max_mbs=max_mbs,
468
+ prev_best_mbs=mbs,
469
+ prev_best_metric_val=metric_val)
470
+ if next_mbs > mbs:
471
+ mbs = next_mbs
472
+ max_mbs = next_max_mbs
473
+ metric_val = next_metric_val
474
+ if has_mlflow:
475
+ mlflow.log_metric(f"z1{self.metric()}", next_metric_val)
476
+ else:
477
+ logger.info(
478
+ f"The model is not runable with ZERO stage {ZeroStageEnum.optimizer_states} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
479
+ )
480
+
481
+ required_gpu_mem = self.get_instantiation_memory_required_per_gpu(
482
+ ZeroStageEnum.gradients) + self.activation_mem
483
+ if self.gpu_mem > required_gpu_mem:
484
+ if "all" in user_zero_stages or ZeroStageEnum.gradients in user_zero_stages:
485
+ logger.info(
486
+ f"The model might be runable with ZERO 2 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_2 to the global tuning space"
487
+ )
488
+ next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_2,
489
+ prev_max_mbs=max_mbs,
490
+ prev_best_mbs=mbs,
491
+ prev_best_metric_val=metric_val)
492
+ if next_mbs > mbs:
493
+ mbs = next_mbs
494
+ max_mbs = next_max_mbs
495
+ metric_val = next_metric_val
496
+ if has_mlflow:
497
+ mlflow.log_metric(f"z2{self.metric()}", next_metric_val)
498
+ else:
499
+ logger.info(
500
+ f"The model is not runable with ZERO stage {ZeroStageEnum.gradients} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)"
501
+ )
502
+
503
+ required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.weights) + self.activation_mem
504
+ if self.gpu_mem > required_gpu_mem:
505
+ if "all" in user_zero_stages or ZeroStageEnum.weights in user_zero_stages:
506
+ logger.info(
507
+ f"The model might be runable with ZERO 3 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_3 to the global tuning space"
508
+ )
509
+ _, _, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_3,
510
+ prev_max_mbs=max_mbs,
511
+ prev_best_mbs=mbs,
512
+ prev_best_metric_val=metric_val)
513
+ if has_mlflow:
514
+ mlflow.log_metric(f"z3{self.metric()}", next_metric_val)
515
+ else:
516
+ logger.info(
517
+ f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZeroStageEnum.weights} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed."
518
+ )
519
+ return
520
+ if has_mlflow:
521
+ mlflow.end_run()
522
+
523
+ def tune_space(self, tuning_space, prev_max_mbs=0, prev_best_mbs=0, prev_best_metric_val=0):
524
+ config_zero = tuning_space.get(ZERO_OPTIMIZATION, {})
525
+ stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None)
526
+ tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
527
+ tuning_micro_batch_sizes = []
528
+ max_train_batch_size_per_gpu = 0
529
+ tuning_micro_batch_sizes_overwritten = False
530
+
531
+ # calculate max micro batch size using gpu memory, model instantiation memory and activation memory
532
+ # calculated_max_micro_batch_size = (memory_per_gpu - instantiation_memory) // activation_memory_micro_batch_size_1
533
+ calculated_max_micro_batch_size = int(
534
+ self.gpu_mem - self.get_instantiation_memory_required_per_gpu(stage)) // self.activation_mem
535
+ logger.info(
536
+ f"Start tuning for space {tuning_space_name}, calculated_max_micro_batch_size = {calculated_max_micro_batch_size}"
537
+ )
538
+
539
+ if calculated_max_micro_batch_size < prev_max_mbs:
540
+ logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
541
+ return 0, 0, 0
542
+
543
+ if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
544
+ self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], list):
545
+ # user-specified micro batch size per gpu is a list which overwrites the default tuning behavior
546
+ tuning_micro_batch_sizes = [
547
+ s for s in self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] if isinstance(s, int)
548
+ ]
549
+ gas = self.get_gas_from_user_config()
550
+ min_micro_batch_size = min(tuning_micro_batch_sizes)
551
+ max_micro_batch_size = max(tuning_micro_batch_sizes)
552
+ max_train_batch_size_per_gpu = max_micro_batch_size * gas
553
+ tuning_micro_batch_sizes_overwritten = True
554
+ else:
555
+ # auto-detects the list of micro batch sizes to tune
556
+ min_micro_batch_size, max_micro_batch_size = self.get_min_max_micro_batch_size(
557
+ stage, prev_max_mbs, calculated_max_micro_batch_size)
558
+
559
+ if max_micro_batch_size < prev_max_mbs:
560
+ logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}")
561
+ return 0, 0, 0
562
+
563
+ tuning_micro_batch_sizes, max_train_batch_size_per_gpu = self.get_tuning_micro_batch_size_list(
564
+ min_micro_batch_size,
565
+ max_micro_batch_size,
566
+ num_tuning_micro_batch_sizes=self.num_tuning_micro_batch_sizes())
567
+
568
+ logger.info(
569
+ f"tuning_micro_batch_sizes = {tuning_micro_batch_sizes}, max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}"
570
+ )
571
+
572
+ # return if the tuning_micro_batch_sizes list is empty
573
+ if not tuning_micro_batch_sizes:
574
+ logger.info(f"End tuning for space {tuning_space_name}")
575
+ return 0, 0, 0
576
+
577
+ # tune micro batch sizes and gradient accumulation steps given max_train_batch_size_per_gpu
578
+ tuning_micro_batch_sizes = self.run_tuning_micro_batch_sizes(tuning_micro_batch_sizes,
579
+ max_train_batch_size_per_gpu,
580
+ min_micro_batch_size, stage,
581
+ tuning_micro_batch_sizes_overwritten)
582
+
583
+ fast_best_record = self.get_best_space_record(tuning_space_name)
584
+ fast_best_metric_val = fast_best_record[1] if fast_best_record else 0
585
+ fast_best_mbs = fast_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if fast_best_record else 0
586
+ logger.info(f"fast_best_mbs = {fast_best_mbs}, name = {fast_best_record[0]['name']}")
587
+
588
+ if self.fast_enabled() or stage == 0:
589
+ logger.info(f"End tuning for space: {tuning_space_name}")
590
+ return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
591
+
592
+ # if the best metric or the micro batch size for that best metric in the current Zero stage after tuning micro batch size is less than the corresponding value in the previous Zero stage, return, do not tune other Zero configuration parameters
593
+ if stage > 0:
594
+ if fast_best_mbs <= prev_best_mbs or fast_best_metric_val < prev_best_metric_val:
595
+ logger.info(
596
+ f"End tuning for space: {tuning_space_name}. No need to tune other Zero configuration parameters.")
597
+ return max_micro_batch_size, fast_best_mbs, fast_best_metric_val
598
+
599
+ tuning_space[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = tuning_micro_batch_sizes
600
+ tuning_space_name = canonical_name(tuning_space,
601
+ tuning_keys=get_tuning_keys(tuning_space),
602
+ prefix="z" + str(stage) + "_",
603
+ omit_val=True)
604
+
605
+ logger.info(f'Tuning space is {tuning_space}')
606
+ logger.info(f'Tuning space name is {tuning_space_name}')
607
+
608
+ exps = self._generate_experiments(tuning_space, max_train_batch_size_per_gpu)
609
+
610
+ logger.info(f'Tuner type is {self.autotuning_config.tuner_type}')
611
+ if self.autotuning_config.tuner_type == AUTOTUNING_TUNER_MODELBASED:
612
+ t = ModelBasedTuner(exps, self.rm, self.metric(), tuning_space)
613
+ elif self.autotuning_config.tuner_type == AUTOTUNING_TUNER_RANDOM:
614
+ t = RandomTuner(exps, self.rm, self.metric())
615
+ else:
616
+ t = GridSearchTuner(exps, self.rm, self.metric())
617
+
618
+ sample_size = len(self.rm.nodes) * self.rm.num_gpus_per_node // (self.exp_num_gpus * self.exp_num_nodes)
619
+ num_exps = t.tune(sample_size=sample_size,
620
+ n_trials=self.autotuning_config.tuner_num_trials,
621
+ early_stopping=self.autotuning_config.tuner_early_stopping)
622
+ exp = t.best_exp
623
+ metric_val = t.best_metric_val
624
+ if exp:
625
+ self.update_records(tuning_space_name, exp, metric_val, num_exps)
626
+
627
+ full_best_record = self.get_best_space_record(tuning_space_name)
628
+ full_best_metric_val = full_best_record[1] if full_best_record else -1
629
+
630
+ if full_best_metric_val > fast_best_metric_val:
631
+ best_metric_val = full_best_metric_val
632
+ best_mbs = full_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if full_best_record else -1
633
+ else:
634
+ best_metric_val = fast_best_metric_val
635
+ best_mbs = fast_best_mbs
636
+
637
+ logger.info(f"End tuning for space: {tuning_space_name}")
638
+ return max_micro_batch_size, best_mbs, best_metric_val
639
+
640
+ def get_plateau_mbs(self, tuning_space_name):
641
+ if tuning_space_name not in self.records:
642
+ return 0
643
+ space_records = self.records[tuning_space_name]
644
+ sorted_space_records = sorted(space_records, key=lambda x: x[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU])
645
+ prev_metric_val = None
646
+ prev_micro_batch_size = 0
647
+ for (exp, metric_val, _) in sorted_space_records:
648
+ if prev_metric_val:
649
+ if metric_val < prev_metric_val:
650
+ break
651
+ if (metric_val >= prev_metric_val
652
+ and (metric_val - prev_metric_val) / prev_metric_val < METRIC_PERCENT_DIFF_CONST):
653
+ break
654
+ prev_metric_val = metric_val
655
+ prev_micro_batch_size = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
656
+ plateau_mbs = prev_micro_batch_size
657
+ return plateau_mbs
658
+
659
+ def get_model_num_params(self):
660
+ if self.model_info and "num_params" in self.model_info:
661
+ return self.model_info["num_params"]
662
+
663
+ def model_info_profile_run(self):
664
+ """Does a model information profiling experiment that collects the number of model parameters and activation memory.\
665
+ The experiment produces a "profile_model_info" folder under self.results_dir.
666
+ Returns:
667
+ [dict]: a model information dictionary, e.g., {"num_params": 335144976, "trainable_num_params": 335144976, "activation_mem_per_gpu": 324358144, "rank": 0}
668
+ """
669
+ logger.info("Starting model info profile run.")
670
+ model_info = self.autotuning_config.model_info
671
+ if model_info and MODEL_INFO_NUM_PARAMS in model_info:
672
+ return model_info
673
+
674
+ ds_config = copy.deepcopy(self.user_config)
675
+ replace_dict(ds_config, DEFAULT_MIN_MEM_CONFIG)
676
+
677
+ model_info_path = os.path.join(self.results_dir, "profile_model_info", "model_info.json")
678
+ ds_config[AUTOTUNING] = {"enabled": True, "model_info_path": model_info_path, "model_info": {"profile": True}}
679
+
680
+ exp_config = {}
681
+ exp_name = "profile_model_info"
682
+ exp_config['name'] = exp_name
683
+ exp_config[DS_CONFIG] = ds_config
684
+ exp_config['num_gpus'] = self.exp_num_gpus
685
+ exp_config['num_nodes'] = self.exp_num_nodes
686
+ exp_config['hostfile'] = self.args.hostfile
687
+ exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
688
+
689
+ with open(exp_path, 'w', buffering=BUFSIZE) as fd:
690
+ json.dump(exp_config, fd)
691
+ fd.flush()
692
+ os.fsync(fd)
693
+
694
+ self.rm.schedule_experiments([exp_path])
695
+ self.rm.run()
696
+
697
+ for exp_id, (exp_json, err) in self.rm.finished_experiments.items():
698
+ self.rm.clear()
699
+ if err:
700
+ logger.error(f"The model is not runnable with DeepSpeed with error = {err}")
701
+ return None
702
+
703
+ if os.path.exists(model_info_path):
704
+ with open(model_info_path, 'r') as f:
705
+ model_info = hjson.load(f)
706
+ return model_info
707
+
708
+ def update_records(self, space_name, exp, metric_val, num_exps):
709
+ if space_name not in self.records:
710
+ self.records[space_name] = [(exp, metric_val, num_exps)]
711
+ else:
712
+ self.records[space_name].append((exp, metric_val, num_exps))
713
+
714
+ def get_best_space_record(self, space_name):
715
+ if space_name not in self.records:
716
+ return None
717
+ space_records = self.records[space_name]
718
+ best_space_record = None
719
+ space_num_exps = 0
720
+ for (exp, metric_val, num_exps) in space_records:
721
+ space_num_exps += num_exps
722
+ if best_space_record is None or metric_val > best_space_record[1]:
723
+ best_space_record = (exp, metric_val)
724
+ if best_space_record:
725
+ best_space_record = best_space_record + (space_num_exps, )
726
+ return best_space_record
727
+
728
+ def get_best_space_records(self):
729
+ best_space_records = {}
730
+ global_best_record = None
731
+ for space_name, space_records in self.records.items():
732
+ best_space_record = self.get_best_space_record(space_name)
733
+ if best_space_record:
734
+ best_space_records[space_name] = best_space_record
735
+ if not global_best_record or best_space_record[1] > global_best_record[1]:
736
+ global_best_record = best_space_record
737
+ if global_best_record:
738
+ best_space_records[GLOBAL_TUNING_SPACE] = global_best_record
739
+ return best_space_records
740
+
741
+ def run_tuning_micro_batch_sizes(self, tuning_micro_batch_sizes, max_train_batch_size_per_gpu,
742
+ min_micro_batch_size, stage, tuning_micro_batch_sizes_overwritten):
743
+ assert tuning_micro_batch_sizes, "the tuning micro batch size list is empty"
744
+ tuning_micro_batch_sizes.sort()
745
+ max_micro_batch_size = tuning_micro_batch_sizes[-1]
746
+ max_micro_batch_size_metric_val = 0
747
+
748
+ ds_config = get_first_config(self.user_config)
749
+ ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
750
+ tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
751
+
752
+ exp_paths = []
753
+ for mbs in tuning_micro_batch_sizes:
754
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
755
+ gas = max_train_batch_size_per_gpu // mbs
756
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
757
+ ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
758
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
759
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
760
+ exp_config = {}
761
+ exp_config['name'] = exp_name
762
+ exp_config[DS_CONFIG] = ds_config
763
+ exp_config['num_gpus'] = self.exp_num_gpus
764
+ exp_config['num_nodes'] = self.exp_num_nodes
765
+ exp_config['hostfile'] = self.args.hostfile
766
+ exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
767
+
768
+ with open(exp_path, 'w', buffering=BUFSIZE) as fd:
769
+ json.dump(exp_config, fd)
770
+ fd.flush()
771
+ os.fsync(fd)
772
+ exp_paths.append(exp_path)
773
+
774
+ self.rm.schedule_experiments(exp_paths)
775
+ self.rm.run()
776
+
777
+ for exp_id, (exp, err) in self.rm.finished_experiments.items():
778
+ if exp:
779
+ metric_file = exp[DS_CONFIG][AUTOTUNING][AUTOTUNING_METRIC_PATH]
780
+ if os.path.exists(metric_file):
781
+
782
+ with open(metric_file, 'r') as f:
783
+ results = hjson.load(f)
784
+ metric_val = results[self.metric()]
785
+ self.update_records(tuning_space_name, exp, metric_val, 1)
786
+ if max_micro_batch_size == exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]:
787
+ max_micro_batch_size_metric_val = metric_val
788
+ if has_mlflow:
789
+ os.environ.pop('MLFLOW_RUN_ID')
790
+ mlflow.start_run(nested=True, run_name=exp['name'])
791
+ for metric in results:
792
+ mlflow.log_metric(metric, results[metric])
793
+ mlflow.end_run()
794
+ os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
795
+ else:
796
+ self.update_records(tuning_space_name, exp, 0, 1)
797
+ else:
798
+ mbs = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]
799
+ logger.info(f"micro batch size = {mbs} was not run successfully")
800
+
801
+ self.rm.clear()
802
+
803
+ if tuning_micro_batch_sizes_overwritten:
804
+ return tuning_micro_batch_sizes
805
+
806
+ # in a auto-detected tuning_micro_batch_sizes list, max_micro_batch_size might not be performant as the memory consumption is close to max
807
+ # try smaller values while gas stays the same
808
+ # if finding a more performant mbs value, use it to replace max_micro_batch_size in the list
809
+ min_micro_batch_size_with_same_gas = (tuning_micro_batch_sizes[-2] +
810
+ 1) if len(tuning_micro_batch_sizes) > 1 else min_micro_batch_size
811
+
812
+ prev_best_metric_val = max_micro_batch_size_metric_val
813
+ prev_best_mbs = max_micro_batch_size
814
+
815
+ stride = (max_micro_batch_size - min_micro_batch_size_with_same_gas) // 3
816
+ if stride == 0:
817
+ stride = 1
818
+ for mbs in reversed(range(min_micro_batch_size_with_same_gas, max_micro_batch_size, stride)):
819
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
820
+ gas = max_train_batch_size_per_gpu // mbs
821
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
822
+ ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
823
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
824
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
825
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
826
+
827
+ if metric_val:
828
+ with open(metric_file, 'r') as f:
829
+ results = hjson.load(f)
830
+ metric_val = results[self.metric()]
831
+ if has_mlflow:
832
+ os.environ.pop('MLFLOW_RUN_ID')
833
+ mlflow.start_run(nested=True, run_name=exp_name)
834
+ for metric in results:
835
+ mlflow.log_metric(metric, results[metric])
836
+ mlflow.end_run()
837
+ os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id
838
+ self.update_records(tuning_space_name, exp, metric_val, 1)
839
+ if metric_val > prev_best_metric_val * (1 + METRIC_PERCENT_DIFF_CONST):
840
+ prev_best_metric_val = metric_val
841
+ prev_best_mbs = mbs
842
+ else:
843
+ break
844
+ else:
845
+ self.update_records(tuning_space_name, exp, 0, 1)
846
+ break
847
+ if prev_best_mbs != max_micro_batch_size:
848
+ tuning_micro_batch_sizes[-1] = prev_best_mbs
849
+ return tuning_micro_batch_sizes
850
+
851
+ def get_min_max_micro_batch_size(self, stage, min_micro_batch_size, calculated_max_micro_batch_size):
852
+ # get min and max micro batch size with gradient accumulation steps = 1
853
+ if min_micro_batch_size > calculated_max_micro_batch_size:
854
+ return -1, -1
855
+
856
+ used_micro_batch_sizes = []
857
+ tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage)
858
+
859
+ ds_config = get_first_config(self.user_config)
860
+ ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage}
861
+ gas = self.get_gas_from_user_config()
862
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
863
+
864
+ # search for the min micro batch size
865
+ if min_micro_batch_size < 1:
866
+ if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance(
867
+ self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], int):
868
+ # user specifies train_micro_batch_size_per_gpu as an int
869
+ mbs = int(self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU])
870
+ else:
871
+ # user does not specify train_micro_batch_size_per_gpu or sets it to "auto" when using Hugging Face
872
+ val = self.get_val_from_user_args(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
873
+ if val:
874
+ mbs = int(val)
875
+ else:
876
+ mbs = 1
877
+ assert mbs > 0, "The micro batch size per GPU must be greater than 0."
878
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
879
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
880
+ ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
881
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
882
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
883
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
884
+ if metric_val:
885
+ self.update_records(tuning_space_name, exp, metric_val, 1)
886
+ used_micro_batch_sizes.append(mbs)
887
+ min_micro_batch_size = mbs
888
+ else:
889
+ self.update_records(tuning_space_name, exp, 0, 1)
890
+ logger.info(f"User-specified micro batch size per GPU {mbs} does not run")
891
+ if self.min_train_micro_batch_size_per_gpu() == mbs:
892
+ return -1, -1
893
+ mbs = self.min_train_micro_batch_size_per_gpu()
894
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
895
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
896
+ ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
897
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
898
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
899
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
900
+ if not metric_val:
901
+ self.update_records(tuning_space_name, exp, 0, 1)
902
+ logger.info(f"min_train_micro_batch_size_per_gpu {mbs} is not runnable.")
903
+ return -1, -1
904
+ self.update_records(tuning_space_name, exp, metric_val, 1)
905
+ min_micro_batch_size = mbs
906
+ used_micro_batch_sizes.append(mbs)
907
+ else:
908
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = min_micro_batch_size
909
+ ds_config[GRADIENT_ACCUMULATION_STEPS] = gas
910
+ ds_config[TRAIN_BATCH_SIZE] = min_micro_batch_size * gas * \
911
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
912
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(min_micro_batch_size)
913
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
914
+ if metric_val:
915
+ self.update_records(tuning_space_name, exp, metric_val, 1)
916
+ used_micro_batch_sizes.append(min_micro_batch_size)
917
+ else:
918
+ self.update_records(tuning_space_name, exp, 0, 1)
919
+ return -1, -1
920
+
921
+ # search for the max micro batch size
922
+ max_micro_batch_size = min(calculated_max_micro_batch_size, self.max_train_micro_batch_size_per_gpu())
923
+ for mbs in [math.ceil(1.05 * max_micro_batch_size), max_micro_batch_size, int(0.95 * max_micro_batch_size)]:
924
+ if mbs > self.max_train_micro_batch_size_per_gpu():
925
+ continue
926
+ if mbs in used_micro_batch_sizes:
927
+ return min_micro_batch_size, mbs
928
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs
929
+ ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \
930
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
931
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs)
932
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
933
+
934
+ if metric_val:
935
+ logger.info(f"mbs = {mbs} is found as max mbs")
936
+ self.update_records(tuning_space_name, exp, metric_val, 1)
937
+ used_micro_batch_sizes.append(mbs)
938
+ return min_micro_batch_size, mbs
939
+ else:
940
+ self.update_records(tuning_space_name, exp, 0, 1)
941
+
942
+ space_records = self.records[tuning_space_name] if tuning_space_name in self.records else []
943
+ if space_records:
944
+ prev_idx = min(range(len(space_records)),
945
+ key=lambda i: abs(space_records[i][0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] -
946
+ min_micro_batch_size))
947
+ prev_metric_val = space_records[prev_idx][1]
948
+ else:
949
+ prev_metric_val = None
950
+
951
+ low = min_micro_batch_size
952
+ high = max_micro_batch_size
953
+ # binary search until low is the smallest micro batch size that OOMs.
954
+ while low <= high:
955
+ mid = int((low + high) // 2)
956
+ logger.debug(f"trying mbs = {mid}, low = {low}, high = {high}")
957
+ if mid not in used_micro_batch_sizes:
958
+ ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mid
959
+ ds_config[TRAIN_BATCH_SIZE] = mid * gas * \
960
+ self.exp_num_gpus * self.exp_num_nodes // self.mp_size()
961
+ exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mid)
962
+ exp, metric_val = self.run_ds_config(ds_config, exp_name)
963
+ if metric_val:
964
+ low = mid + 1
965
+ self.update_records(tuning_space_name, exp, metric_val, 1)
966
+ used_micro_batch_sizes.append(mid)
967
+ if prev_metric_val and (
968
+ (metric_val - prev_metric_val) / prev_metric_val) < METRIC_PERCENT_DIFF_CONST:
969
+ logger.info(f"performance plateaus at mbs = {low}")
970
+ break
971
+ prev_metric_val = metric_val
972
+ else:
973
+ self.update_records(tuning_space_name, exp, 0, 1)
974
+ high = mid - 1
975
+ else:
976
+ low = mid + 1
977
+ max_micro_batch_size = low - 1
978
+
979
+ logger.info(f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}.")
980
+
981
+ return min_micro_batch_size, max_micro_batch_size
982
+
983
+ def get_gas_from_user_config(self):
984
+ gas = 1
985
+ if GRADIENT_ACCUMULATION_STEPS in self.user_config:
986
+ gas_in_config = self.user_config[GRADIENT_ACCUMULATION_STEPS]
987
+ if isinstance(gas_in_config, int):
988
+ gas = gas_in_config
989
+ elif gas_in_config == "auto": # GRADIENT_ACCUMULATION_STEPS: "auto"
990
+ val = self.get_val_from_user_args(GRADIENT_ACCUMULATION_STEPS)
991
+ if val:
992
+ gas = int(val)
993
+ elif isinstance(gas_in_config, list):
994
+ logger.info(
995
+ f"Specifying a list of {GRADIENT_ACCUMULATION_STEPS} to tune is not supported. 1 would be used.")
996
+ assert gas > 0, "Gradient accumulation steps must be positive."
997
+ return gas
998
+
999
+ def get_val_from_user_args(self, ds_name):
1000
+ arg_mappings = self.autotuning_config.arg_mappings
1001
+ user_args = self.args.user_args
1002
+ if arg_mappings and ds_name in arg_mappings:
1003
+ arg_name = arg_mappings[ds_name]
1004
+ if arg_name in user_args:
1005
+ idx = user_args.index(arg_name)
1006
+ if user_args[idx + 1].isnumeric():
1007
+ return (user_args[idx + 1])
1008
+ return None
1009
+
1010
+ def get_tuning_micro_batch_size_list(self, min_micro_batch_size, max_micro_batch_size,
1011
+ num_tuning_micro_batch_sizes):
1012
+ """Get a list of micro batch sizes to tune based on min and max values, as well as the size of the list.
1013
+ Args:
1014
+ min_micro_batch_size ([int]): min micro batch size per GPU
1015
+ max_micro_batch_size ([int]): max micro batch size per GPU
1016
+ num_tuning_micro_batch_sizes (int): the number of items in the returned list
1017
+
1018
+ Returns:
1019
+ [list]: a list of micro batch sizes to tune.
1020
+ """
1021
+ if min_micro_batch_size <= 0 or max_micro_batch_size <= 0:
1022
+ logger.info(
1023
+ f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}")
1024
+ return [], 0
1025
+
1026
+ # NUM_GPUS=$(( ${NUM_WORKERS} * ${NUM_GPUS_PER_WORKER} ))
1027
+ # DP_SIZE=$(( ${NUM_GPUS} / (${PP_SIZE} * ${MP_SIZE}) ))
1028
+ # GRAD_ACC_STEPS=$(( ${TARGET_GLOBAL_BATCH_SIZE} / (${BATCH_SIZE} * ${DP_SIZE}) ))
1029
+ if self.max_train_batch_size(
1030
+ ) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size
1031
+ max_train_batch_size_per_gpu = self.max_train_batch_size() * self.mp_size() // (self.exp_num_gpus *
1032
+ self.exp_num_nodes)
1033
+ else:
1034
+ gas = self.get_gas_from_user_config()
1035
+ max_train_batch_size_per_gpu = max_micro_batch_size * gas // self.mp_size()
1036
+ logger.info(f"max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}")
1037
+ if min_micro_batch_size < max_micro_batch_size // 2:
1038
+ min_micro_batch_size = max_micro_batch_size // 2
1039
+
1040
+ # constant stride
1041
+ stride = (max_micro_batch_size - min_micro_batch_size) // num_tuning_micro_batch_sizes
1042
+ if stride == 0:
1043
+ stride = 1
1044
+ ls = []
1045
+ min_gas = max_train_batch_size_per_gpu // max_micro_batch_size
1046
+ # if gas is the same as min_gas, do not add mbs to the tuning list
1047
+ for mbs in range(min_micro_batch_size, max_micro_batch_size, stride):
1048
+ if max_train_batch_size_per_gpu // mbs != min_gas:
1049
+ ls.append(mbs)
1050
+ ls.append(max_micro_batch_size)
1051
+
1052
+ return ls, max_train_batch_size_per_gpu
1053
+
1054
+ def run_ds_config(self, ds_config, exp_name):
1055
+ exp_config = {}
1056
+ exp_config['name'] = exp_name
1057
+ exp_config[DS_CONFIG] = ds_config
1058
+ exp_config['num_gpus'] = self.exp_num_gpus
1059
+ exp_config['num_nodes'] = self.exp_num_nodes
1060
+ exp_config['hostfile'] = self.args.hostfile
1061
+ exp_path = os.path.join(self.exps_dir, f'{exp_name}.json')
1062
+
1063
+ logger.debug(f'run_ds_config exp_name = {exp_name}')
1064
+
1065
+ with open(exp_path, 'w', buffering=BUFSIZE) as fd:
1066
+ json.dump(exp_config, fd)
1067
+ fd.flush()
1068
+ os.fsync(fd)
1069
+ self.rm.schedule_experiments([exp_path])
1070
+ self.rm.run()
1071
+ exp, metric_val = self.rm.parse_results(self.metric())
1072
+ self.rm.clear()
1073
+ return exp, metric_val
1074
+
1075
+ def write_optimal_config(self):
1076
+ best_space_records = self.get_best_space_records()
1077
+ if GLOBAL_TUNING_SPACE not in best_space_records:
1078
+ return
1079
+ best_exp, best_metric_val, _ = best_space_records[GLOBAL_TUNING_SPACE]
1080
+ if best_exp:
1081
+ exp_dir = best_exp["result_dir"]
1082
+ cmd = None
1083
+ with open(os.path.join(exp_dir, "cmd.txt"), "r") as f:
1084
+ cmd = [str(i) for i in f.read().split()]
1085
+
1086
+ ds_config = hjson.load(open(os.path.join(exp_dir, "ds_config.json"), "r"))
1087
+ ds_config.pop(AUTOTUNING)
1088
+
1089
+ ds_config_path = os.path.join(self.results_dir, "ds_config_optimal.json")
1090
+ json.dump(ds_config, open(ds_config_path, "w"))
1091
+
1092
+ cmd_path = os.path.join(self.results_dir, "cmd_optimal.txt")
1093
+ with open(cmd_path, "w") as fd:
1094
+ fd.write(" ".join(cmd))
1095
+ fd.write("\n")
1096
+ fd.flush()
1097
+ self.optimal_cmd = cmd
1098
+ self.optimal_ds_config = ds_config
1099
+ logger.info(
1100
+ f"Wrote the optimal DeepSpeed configuration found by autotuning to {ds_config_path}, and the corresponding DeepSpeed command to {cmd_path}"
1101
+ )
1102
+
1103
+ def run_after_tuning(self):
1104
+ """ Launches the training with the optimal DeepSpeed configuration found through the autotuning process.
1105
+ "ds_config_optimal.json" describing the optimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir.
1106
+ """
1107
+ if self.optimal_cmd:
1108
+ result = subprocess.Popen(self.optimal_cmd)
1109
+ result.wait()
1110
+
1111
+ logger.info(f"Done running with the optimal DeepSpeed configuration using {self.optimal_cmd}")
1112
+ else:
1113
+ logger.info(f"No optimal DeepSpeed configuration found by autotuning.")
venv/lib/python3.10/site-packages/deepspeed/autotuning/config.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject
7
+ from deepspeed.autotuning.constants import *
8
+
9
+
10
+ class DeepSpeedAutotuningConfig(DeepSpeedConfigObject):
11
+
12
+ def __init__(self, param_dict):
13
+ super(DeepSpeedAutotuningConfig, self).__init__()
14
+
15
+ self.enabled = None
16
+ self.start_step = None
17
+ self.end_step = None
18
+ self.metric_path = None
19
+ self.arg_mappings = None
20
+ self.metric = None
21
+ self.model_info = None
22
+ self.results_dir = None
23
+ self.exps_dir = None
24
+ self.overwrite = None
25
+
26
+ if param_dict and AUTOTUNING in param_dict.keys():
27
+ autotuning_dict = param_dict[AUTOTUNING]
28
+ else:
29
+ autotuning_dict = {}
30
+
31
+ self._initialize(autotuning_dict)
32
+
33
+ def _initialize(self, autotuning_dict):
34
+ self.enabled = get_scalar_param(autotuning_dict, AUTOTUNING_ENABLED, AUTOTUNING_ENABLED_DEFAULT)
35
+
36
+ self.fast = get_scalar_param(autotuning_dict, AUTOTUNING_FAST, AUTOTUNING_FAST_DEFAULT)
37
+
38
+ self.results_dir = get_scalar_param(autotuning_dict, AUTOTUNING_RESULTS_DIR, AUTOTUNING_RESULTS_DIR_DEFAULT)
39
+ assert self.results_dir, "results_dir cannot be empty"
40
+ self.exps_dir = get_scalar_param(autotuning_dict, AUTOTUNING_EXPS_DIR, AUTOTUNING_EXPS_DIR_DEFAULT)
41
+ assert self.exps_dir, "exps_dir cannot be empty"
42
+ self.overwrite = get_scalar_param(autotuning_dict, AUTOTUNING_OVERWRITE, AUTOTUNING_OVERWRITE_DEFAULT)
43
+
44
+ self.start_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_START_PROFILE_STEP,
45
+ AUTOTUNING_START_PROFILE_STEP_DEFAULT)
46
+
47
+ self.end_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_END_PROFILE_STEP,
48
+ AUTOTUNING_END_PROFILE_STEP_DEFAULT)
49
+
50
+ self.metric = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC, AUTOTUNING_METRIC_DEFAULT)
51
+
52
+ self.metric_path = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC_PATH, AUTOTUNING_METRIC_PATH_DEFAULT)
53
+
54
+ self.tuner_type = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_TYPE, AUTOTUNING_TUNER_TYPE_DEFAULT)
55
+
56
+ self.tuner_early_stopping = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_EARLY_STOPPING,
57
+ AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT)
58
+
59
+ self.tuner_num_trials = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_NUM_TRIALS,
60
+ AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT)
61
+
62
+ self.arg_mappings = get_dict_param(autotuning_dict, AUTOTUNING_ARG_MAPPINGS, AUTOTUNING_ARG_MAPPINGS_DEFAULT)
63
+
64
+ self.model_info = get_model_info_config(autotuning_dict)
65
+
66
+ self.model_info_path = get_scalar_param(autotuning_dict, AUTOTUNING_MODEL_INFO_PATH,
67
+ AUTOTUNING_MODEL_INFO_PATH_DEFAULT)
68
+ self.mp_size = get_scalar_param(autotuning_dict, AUTOTUNING_MP_SIZE, AUTOTUNING_MP_SIZE_DEFAULT)
69
+
70
+ self.max_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MAX_TRAIN_BATCH_SIZE,
71
+ AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT)
72
+
73
+ self.min_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MIN_TRAIN_BATCH_SIZE,
74
+ AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT)
75
+
76
+ self.max_train_micro_batch_size_per_gpu = get_dict_param(
77
+ autotuning_dict, AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
78
+ AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
79
+
80
+ self.min_train_micro_batch_size_per_gpu = get_dict_param(
81
+ autotuning_dict, AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU,
82
+ AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
83
+
84
+ self.num_tuning_micro_batch_sizes = get_dict_param(autotuning_dict, AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES,
85
+ AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT)
86
+
87
+
88
+ def get_model_info_config(param_dict):
89
+ if MODEL_INFO in param_dict and param_dict[MODEL_INFO] is not None:
90
+ model_info_config = {}
91
+ for key, default_value in MODEL_INFO_KEY_DEFAULT_DICT.items():
92
+ model_info_config[key] = get_scalar_param(param_dict[MODEL_INFO], key, default_value)
93
+ return model_info_config
94
+ return None
95
+
96
+
97
+ def get_default_model_info_config():
98
+ return MODEL_INFO_KEY_DEFAULT_DICT
venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero0.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 0
4
+ }
5
+ }
venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero1.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 1,
4
+ "reduce_bucket_size": 5e8,
5
+ "allgather_bucket_size": 5e8
6
+ }
7
+ }
venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero2.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 2,
4
+ "allgather_partitions": true,
5
+ "allgather_bucket_size": 5e8,
6
+ "overlap_comm": false,
7
+ "reduce_scatter": true,
8
+ "reduce_bucket_size": 5e8,
9
+ "contiguous_gradients": false
10
+ }
11
+ }
venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero3.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zero_optimization": {
3
+ "stage": 3,
4
+ "allgather_partitions": true,
5
+ "allgather_bucket_size": 5e8,
6
+ "overlap_comm": false,
7
+ "reduce_scatter": true,
8
+ "reduce_bucket_size": 5e8,
9
+ "contiguous_gradients": false,
10
+ "stage3_max_live_parameters": 1e9,
11
+ "stage3_max_reuse_distance": 1e9,
12
+ "stage3_prefetch_bucket_size": 5e8,
13
+ "stage3_param_persistence_threshold": 1e6,
14
+ "stage3_gather_16bit_weights_on_model_save": false,
15
+ "sub_group_size": 1e12
16
+ }
17
+ }
venv/lib/python3.10/site-packages/deepspeed/autotuning/constants.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ #########################################
7
+ # autotuner implementation constants
8
+ #########################################
9
+
10
+ import os
11
+
12
+ DEFAULT_TEMPLATE_PATH_ZERO_0 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
13
+ "template_zero0.json")
14
+ DEFAULT_TEMPLATE_PATH_ZERO_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
15
+ "template_zero1.json")
16
+ DEFAULT_TEMPLATE_PATH_ZERO_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
17
+ "template_zero2.json")
18
+ DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
19
+ "template_zero3.json")
20
+
21
+ METRIC_PERCENT_DIFF_CONST = 0.05
22
+ DS_CONFIG = "ds_config"
23
+ BUFSIZE = 1 # line buffer size for writing files
24
+
25
+ #########################################
26
+ # autotuner configuration constants
27
+ #########################################
28
+ # Autotuner. By default, this feature is not enabled.
29
+ # Users can configure in ds_config.json as below example:
30
+ AUTOTUNING_FORMAT = """
31
+ autotuner should be enabled as:
32
+ "session_params": {
33
+ "autotuning": {
34
+ "enabled": true,
35
+ "start_step": 5,
36
+ "end_step": 15
37
+ }
38
+ }
39
+ """
40
+
41
+ AUTOTUNING = "autotuning"
42
+
43
+ AUTOTUNING_ENABLED = "enabled"
44
+ AUTOTUNING_ENABLED_DEFAULT = False
45
+
46
+ AUTOTUNING_FAST = "fast"
47
+ AUTOTUNING_FAST_DEFAULT = True
48
+
49
+ AUTOTUNING_RESULTS_DIR = "results_dir"
50
+ AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results"
51
+
52
+ AUTOTUNING_EXPS_DIR = "exps_dir"
53
+ AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps"
54
+
55
+ AUTOTUNING_OVERWRITE = "overwrite"
56
+ AUTOTUNING_OVERWRITE_DEFAULT = True
57
+
58
+ AUTOTUNING_START_PROFILE_STEP = "start_profile_step"
59
+ AUTOTUNING_START_PROFILE_STEP_DEFAULT = 3
60
+
61
+ AUTOTUNING_END_PROFILE_STEP = "end_profile_step"
62
+ AUTOTUNING_END_PROFILE_STEP_DEFAULT = 5
63
+ AUTOTUNING_METRIC_PATH = "metric_path"
64
+ AUTOTUNING_METRIC_PATH_DEFAULT = None
65
+
66
+ AUTOTUNING_TUNER_TYPE = "tuner_type"
67
+ AUTOTUNING_TUNER_GRIDSEARCH = "gridsearch"
68
+ AUTOTUNING_TUNER_RANDOM = "random"
69
+ AUTOTUNING_TUNER_MODELBASED = "model_based"
70
+ AUTOTUNING_TUNER_TYPE_DEFAULT = AUTOTUNING_TUNER_GRIDSEARCH
71
+ AUTOTUNING_TUNER_EARLY_STOPPING = "tuner_early_stopping"
72
+ AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT = 5
73
+ AUTOTUNING_TUNER_NUM_TRIALS = "tuner_num_trials"
74
+ AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT = 50
75
+
76
+ AUTOTUNING_ARG_MAPPINGS = "arg_mappings"
77
+ AUTOTUNING_ARG_MAPPINGS_DEFAULT = None
78
+
79
+ AUTOTUNING_MAX_TRAIN_BATCH_SIZE = "max_train_batch_size"
80
+ AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT = None
81
+ AUTOTUNING_MIN_TRAIN_BATCH_SIZE = "min_train_batch_size"
82
+ AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT = 1
83
+ AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "max_train_micro_batch_size_per_gpu"
84
+ AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1024
85
+ AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "min_train_micro_batch_size_per_gpu"
86
+ AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1
87
+ AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES = "num_tuning_micro_batch_sizes"
88
+ AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT = 3
89
+
90
+ AUTOTUNING_MP_SIZE = "mp_size"
91
+ AUTOTUNING_MP_SIZE_DEFAULT = 1
92
+
93
+ AUTOTUNING_METRIC = "metric"
94
+ AUTOTUNING_METRIC_LATENCY = "latency"
95
+ AUTOTUNING_METRIC_THROUGHPUT = "throughput"
96
+ AUTOTUNING_METRIC_FLOPS = "flops"
97
+ AUTOTUNING_METRIC_FORWARD = "forward"
98
+ AUTOTUNING_METRIC_BACKWRAD = "flops"
99
+ AUTOTUNING_METRIC_STEPS = "step"
100
+ AUTOTUNING_METRIC_DEFAULT = AUTOTUNING_METRIC_THROUGHPUT
101
+
102
+ #########################################
103
+ # MODEL INFO
104
+ #########################################
105
+ AUTOTUNING_MODEL_INFO_PATH = "model_info_path"
106
+ AUTOTUNING_MODEL_INFO_PATH_DEFAULT = None
107
+
108
+ MODEL_INFO_FORMAT = '''
109
+ "model_info": {
110
+ "num_params": 1000000000,
111
+ "hidden_size": 10,
112
+ "num_layers": 12,
113
+ }
114
+ '''
115
+ MODEL_INFO = "model_info"
116
+ MODEL_INFO_PROFILE = "profile"
117
+ MODEL_INFO_PROFILE_DEFAULT = False
118
+ MODEL_INFO_NUM_PARAMS = "num_params"
119
+ MODEL_INFO_NUM_PARAMS_DEFAULT = None
120
+ MODEL_INFO_HIDDEN_SIZE = "hidden_size"
121
+ MODEL_INFO_HIDDEN_SIZE_DEFAULT = None
122
+ MODEL_INFO_NUM_LAYERS = "num_layers"
123
+ MODEL_INFO_NUM_LAYERS_DEFAULT = None
124
+
125
+ MODEL_INFO_KEY_DEFAULT_DICT = {
126
+ MODEL_INFO_PROFILE: MODEL_INFO_PROFILE_DEFAULT,
127
+ MODEL_INFO_NUM_PARAMS: MODEL_INFO_NUM_PARAMS_DEFAULT,
128
+ MODEL_INFO_HIDDEN_SIZE: MODEL_INFO_HIDDEN_SIZE_DEFAULT,
129
+ MODEL_INFO_NUM_LAYERS: MODEL_INFO_NUM_LAYERS_DEFAULT
130
+ }
131
+
132
+ #########################################
133
+ # autotuner search space constants
134
+ #########################################
135
+
136
+ DEFAULT_HF_CONFIG = {
137
+ "train_batch_size": "auto",
138
+ "train_micro_batch_size_per_gpu": "auto",
139
+ "gradient_accumulation_steps": "auto",
140
+ }
141
+
142
+ DEFAULT_MIN_MEM_CONFIG = {
143
+ "train_micro_batch_size_per_gpu": 1,
144
+ "zero_optimization": {
145
+ "stage": 3
146
+ },
147
+ "memory_break_down": False
148
+ }
149
+
150
+ DEFAULT_TUNING_SPACE_ZERO_0 = {"zero_optimization": {"stage": 0}}
151
+
152
+ DEFAULT_TUNING_SPACE_ZERO_1 = {
153
+ "zero_optimization": {
154
+ "stage": 1,
155
+ "reduce_bucket_size": [5e7, 5e8, 1e9],
156
+ "allgather_bucket_size": [5e7, 5e8, 1e9],
157
+ }
158
+ }
159
+
160
+ DEFAULT_TUNING_SPACE_ZERO_2 = {
161
+ "zero_optimization": {
162
+ "stage": 2,
163
+ "overlap_comm": [True, False],
164
+ "reduce_scatter": [False, True],
165
+ "reduce_bucket_size": [5e7, 5e8, 1e9],
166
+ "allgather_bucket_size": [5e7, 5e8, 1e9],
167
+ "contiguous_gradients": [False, True]
168
+ },
169
+ }
170
+
171
+ DEFAULT_TUNING_SPACE_ZERO_3 = {
172
+ "zero_optimization": {
173
+ "stage": 3,
174
+ "overlap_comm": [True, False],
175
+ "reduce_scatter": [False, True],
176
+ "reduce_bucket_size": [5e7, 5e8, 1e9],
177
+ "allgather_partitions": [True, False],
178
+ "allgather_bucket_size": [5e7, 5e8, 1e9],
179
+ "contiguous_gradients": [False, True]
180
+ },
181
+ }
182
+
183
+ GLOBAL_TUNING_SPACE = 'global'
184
+ # TUNING_MICRO_BATCH_SIZE_PREFIX="tune_micro_batch_size_z"
185
+ TUNING_MICRO_BATCH_SIZE_PREFIX = "z"
venv/lib/python3.10/site-packages/deepspeed/autotuning/scheduler.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import copy
7
+
8
+ from numpy import BUFSIZE
9
+ import json
10
+ import subprocess
11
+ import sys
12
+ import threading
13
+ import time
14
+ import base64
15
+
16
+ import os
17
+ import hjson
18
+ from tqdm import tqdm
19
+
20
+ from ..utils import logger
21
+ from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
22
+ from .utils import get_val_by_key, search_error, was_interruptted
23
+ """
24
+ thread-0: loop over experiment queue dispatching experiments if they become available
25
+ thread-N: start each experiment in its own thread
26
+ """
27
+
28
+ from deepspeed import comm as dist
29
+
30
+ TIMEOUT = 5
31
+
32
+
33
+ class ResourceManager:
34
+
35
+ def __init__(self, args, hosts, num_gpus_per_node, results_dir, exps_dir, arg_mappings):
36
+ self.results_dir = results_dir
37
+ self.exps_dir = exps_dir
38
+
39
+ self.nodes = []
40
+ self.num_gpus_per_node = num_gpus_per_node
41
+ for host in hosts:
42
+ self.nodes.append(Node(host, num_gpus_per_node))
43
+
44
+ self.experiment_queue = []
45
+ self.running_experiments = {}
46
+ self.finished_experiments = {}
47
+ self.experiment_count = 0
48
+ self.exp_paths = set()
49
+ self.args = args
50
+
51
+ self.arg_mappings = {}
52
+ if arg_mappings is not None:
53
+ for k, v in arg_mappings.items():
54
+ k = k.strip()
55
+ v = v.strip()
56
+ if k not in self.arg_mappings:
57
+ self.arg_mappings[k] = v
58
+
59
+ def schedule_experiments(self, exp_paths):
60
+ for exp_path in exp_paths:
61
+ if exp_path in self.exp_paths:
62
+ continue
63
+ else:
64
+ self.exp_paths.add(exp_path)
65
+ with open(exp_path, "r") as fd:
66
+ exp = hjson.load(fd)
67
+ exp["exp_id"] = self.experiment_count
68
+ self.experiment_count += 1
69
+
70
+ result_dir = exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
71
+ if AUTOTUNING in exp["ds_config"]:
72
+ metric_file = os.path.join(result_dir, "metrics.json")
73
+ exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] = metric_file
74
+ stderr_file = os.path.join(result_dir, "stderr.log")
75
+ model_info_file = os.path.join(result_dir, "model_info.json")
76
+ metric_file = os.path.join(result_dir, "metrics.json")
77
+
78
+ # skip existing experiments (except for the ones that were interrupted)
79
+ if os.path.exists(result_dir) and os.path.exists(stderr_file):
80
+ if not was_interruptted(stderr_file):
81
+ err = search_error(stderr_file)
82
+ exp_id = exp["exp_id"]
83
+ self.finished_experiments[exp_id] = (exp, err)
84
+ if err or os.path.exists(metric_file) or os.path.exists(model_info_file):
85
+ logger.info(f"Skipping exp {exp['name']} whose result already exists")
86
+ continue
87
+
88
+ self.experiment_queue.append(exp)
89
+
90
+ def run_job(self, exp: dict, reservations):
91
+ exp_id = exp["exp_id"]
92
+ exp["master_port"] = self.args.master_port + exp_id
93
+ exp["result_dir"] = os.path.join(self.results_dir, exp['name'])
94
+ user_script = self.args.user_script
95
+ user_args = self.args.user_args
96
+
97
+ # overwrite the user arg in the arg_mappings
98
+ for key, val in self.arg_mappings.items():
99
+ nval = get_val_by_key(exp, key)
100
+ if nval and str(nval) != "auto":
101
+ if val in user_args:
102
+ idx = user_args.index(val)
103
+ user_args[idx + 1] = str(nval)
104
+ else:
105
+ user_args.append(val)
106
+ user_args.append(str(nval))
107
+
108
+ t = threading.Thread(target=run_experiment, args=(exp, reservations, user_script, user_args))
109
+ t.start()
110
+ self.running_experiments[exp_id] = (t, exp, reservations, time.time())
111
+
112
+ def experiment_check(self, pbar):
113
+ finished_exps = []
114
+ for exp_id, exp_data in self.running_experiments.items():
115
+ thread, exp_json, reservations, start_time = exp_data
116
+ logger.debug(f"Checking exp_id = {exp_id}, alive = {thread.is_alive()}")
117
+ thread.join(timeout=TIMEOUT)
118
+ if not thread.is_alive():
119
+ exp_dir = exp_json["result_dir"]
120
+ stderr_file = os.path.join(exp_dir, "stderr.log")
121
+ err = search_error(stderr_file)
122
+ finished_exps.append((exp_id, reservations))
123
+ self.finished_experiments[exp_id] = (exp_json, err)
124
+ duration = time.time() - start_time
125
+ logger.debug(f"Finished exp_id = {exp_id}, duration={duration:.2f} sec")
126
+ pbar.update(len(finished_exps))
127
+ for exp_id, reservations in finished_exps:
128
+ for reservation in reservations:
129
+ reservation.restore_slots()
130
+ self.running_experiments.pop(exp_id)
131
+ time.sleep(TIMEOUT)
132
+
133
+ def resource_request(self, exp):
134
+ num_gpus, num_nodes = exp['num_gpus'], exp['num_nodes']
135
+ slot_request = num_gpus
136
+ reservations = []
137
+ for node in self.nodes:
138
+ if num_nodes == 0:
139
+ break
140
+ slots = node.reserve_slots(slot_request=slot_request)
141
+ if slots:
142
+ reservations.append(Reservation(node=node, slots=slots))
143
+ num_nodes -= 1
144
+
145
+ if num_nodes == 0:
146
+ # request satisfied
147
+ return reservations
148
+ else:
149
+ # request not satisfied
150
+ for reservation in reservations:
151
+ reservation.restore_slots()
152
+
153
+ def status(self):
154
+ status = ""
155
+ for node in self.nodes:
156
+ status += f"{node.host} ({len(node.idle_slots)} idle gpus), "
157
+ return status[:-1]
158
+
159
+ def run(self):
160
+ pbar = tqdm(total=len(self.experiment_queue))
161
+
162
+ while len(self.experiment_queue) > 0:
163
+ exp = self.experiment_queue.pop(0)
164
+ logger.debug(f'Popped exp_id = {exp["exp_id"]} from the queue')
165
+ logger.debug(f'Resource status: {self.status()}')
166
+ reservations = self.resource_request(exp)
167
+
168
+ if not reservations:
169
+ logger.debug(f'Unable to schedule exp_id = {exp["exp_id"]}')
170
+ self.experiment_queue.insert(0, exp)
171
+ logger.debug(f'Put exp_id = {exp["exp_id"]} back into the queue')
172
+ self.experiment_check(pbar)
173
+ else:
174
+ desc = ""
175
+ for reservation in reservations:
176
+ reservation.slots.sort()
177
+ slots = ",".join(map(str, reservation.slots))
178
+ desc += f"{reservation.node.host}:{slots}@"
179
+ desc = desc[:-1]
180
+ logger.debug(f'Running exp_id = {exp["exp_id"]} on {desc}')
181
+ self.run_job(exp, reservations)
182
+
183
+ # All pending experiments are scheduled, waiting for them to complete
184
+ while len(self.running_experiments) > 0:
185
+ self.experiment_check(pbar)
186
+
187
+ def save_exp_results_to_database(self, message, ranks=None, path=None):
188
+ """Print message when one of following condition meets
189
+
190
+ + not dist.is_initialized()
191
+ + dist.get_rank() in ranks if ranks is not None or ranks = [-1]
192
+
193
+ Args:
194
+ message (str)
195
+ ranks (list)
196
+ path (str)
197
+
198
+ """
199
+ should_log = not dist.is_initialized()
200
+ ranks = ranks or []
201
+ my_rank = dist.get_rank() if dist.is_initialized() else -1
202
+ if ranks and not should_log:
203
+ should_log = ranks[0] == -1
204
+ should_log = should_log or (my_rank in set(ranks))
205
+ logger.debug(f"*** Should log: {should_log}")
206
+ if should_log:
207
+ message['rank'] = my_rank
208
+ with open(path, 'a') as outfile:
209
+ json.dump(message, outfile)
210
+ outfile.write('\n')
211
+
212
+ def parse_results(self, metric):
213
+ """ Parses the metric file of the finished experiments to select the optimal DeepSpeed configuration.
214
+
215
+ Args:
216
+ finished_experiments (dcit): a dictionary of experiment id and experiment description.
217
+
218
+ Returns:
219
+ The path to the result folder of the experiment with the optimal configuration.
220
+ """
221
+ max_throughput = sys.float_info.min
222
+ best_exp_id = -1
223
+ for exp_id, (exp, err) in self.finished_experiments.items():
224
+ if err:
225
+ logger.info(
226
+ f"The experiment exp_id = {exp_id}, exp_name = {exp['name']}, did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Check the stderr.log in {exp['result_dir']}"
227
+ )
228
+ continue
229
+
230
+ metric_file = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
231
+
232
+ if os.path.exists(metric_file):
233
+ with open(metric_file, 'r') as f:
234
+ results = hjson.load(f)
235
+ curr_throughput = results[metric]
236
+ if curr_throughput > max_throughput:
237
+ max_throughput = curr_throughput
238
+ best_exp_id = exp_id
239
+ exp['results'] = results
240
+
241
+ if best_exp_id != -1:
242
+ best_exp, _ = self.finished_experiments[best_exp_id]
243
+ return best_exp, max_throughput
244
+
245
+ return exp, None
246
+
247
+ def clear(self):
248
+ """Clear experiment queues, does not reset self.experiment_count
249
+ """
250
+ self.experiment_queue = []
251
+ # clean up the running experiments
252
+ for exp_id, exp_data in self.running_experiments.items():
253
+ thread, exp_json, reservations, start_time = exp_data
254
+ clean_up(exp_json, reservations)
255
+ self.running_experiments = {}
256
+ self.finished_experiments = {}
257
+ self.exp_paths = set()
258
+
259
+
260
+ class Node:
261
+
262
+ def __init__(self, host, max_slots):
263
+ self.host = host
264
+ self.max_slots = max_slots
265
+ self.idle_slots = list(range(max_slots))
266
+
267
+ def reserve_slots(self, slot_request: int) -> list:
268
+ if len(self.idle_slots) >= slot_request:
269
+ return [self.idle_slots.pop(0) for _ in range(slot_request)]
270
+
271
+ def restore_slots(self, slots: list):
272
+ self.idle_slots += slots
273
+
274
+
275
+ class Reservation:
276
+
277
+ def __init__(self, node, slots):
278
+ self.node = node
279
+ self.slots = slots
280
+
281
+ def restore_slots(self):
282
+ self.node.restore_slots(self.slots)
283
+
284
+ def desc(self):
285
+ slots = ",".join(map(str, self.slots))
286
+ return f"{self.node.host}:{slots}@"
287
+
288
+
289
+ def get_job_id():
290
+ # Infrastructure-specific job-id
291
+ infra_job_id = None
292
+ if "DLWS_JOB_ID" in os.environ:
293
+ infra_job_id = os.environ["DLWS_JOB_ID"]
294
+ elif "DLTS_JOB_ID" in os.environ:
295
+ infra_job_id = os.environ["DLTS_JOB_ID"]
296
+ else:
297
+ infra_job_id = "unknown-job-id"
298
+
299
+ return infra_job_id
300
+
301
+
302
+ def get_user():
303
+ user = None
304
+ if "USER" in os.environ:
305
+ user = os.environ["USER"]
306
+ else:
307
+ user = "unknown-user"
308
+ return user
309
+
310
+
311
+ def run_experiment(exp: dict, reservations, user_script, user_args):
312
+ include_str = ""
313
+ for reservation in reservations:
314
+ reservation.slots.sort()
315
+ slots = ",".join(map(str, reservation.slots))
316
+ include_str += f"{reservation.node.host}:{slots}@"
317
+ include_str = include_str[:-1]
318
+ master_port = exp["master_port"]
319
+ hostfile = exp["hostfile"]
320
+ exp["launcher_args"] = [
321
+ "--hostfile",
322
+ f"{hostfile}",
323
+ "--include",
324
+ f"{include_str}",
325
+ "--master_port",
326
+ str(master_port),
327
+ ]
328
+ logger.debug(f'launcher args={exp["launcher_args"]}')
329
+
330
+ exp["user"] = get_user()
331
+ exp["job_id"] = get_job_id()
332
+ exp_dir = exp["result_dir"]
333
+ os.makedirs(exp_dir, exist_ok=True)
334
+ ds_config_path = os.path.join(exp_dir, "ds_config.json")
335
+ exp["ds_config_path"] = ds_config_path
336
+
337
+ ds_config = copy.deepcopy(exp["ds_config"])
338
+ ds_config_json = json.dumps(ds_config).encode('utf-8')
339
+
340
+ exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8')
341
+
342
+ with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd:
343
+ json.dump(ds_config, fd)
344
+ fd.flush()
345
+ os.fsync(fd)
346
+ path = exp["ds_config_path"]
347
+ logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}")
348
+
349
+ with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd:
350
+ json.dump(exp, fd)
351
+ fd.flush()
352
+ os.fsync(fd)
353
+ path = os.path.join(exp_dir, "exp.json")
354
+ logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}")
355
+
356
+ # remove "--deepspeed_config ds_config.json" from user_args
357
+ if user_args:
358
+ if "--deepspeed_config" in user_args:
359
+ idx = user_args.index("--deepspeed_config")
360
+ # "--deepspeed_config" is omitted in HF
361
+ elif "--deepspeed" in user_args:
362
+ idx = user_args.index("--deepspeed")
363
+ assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed"
364
+ # user_args[idx + 1] = exp["ds_config_path"]
365
+ # pass base64 serialized ds_config to launcher
366
+ user_args[idx + 1] = exp["ds_config_base64"]
367
+
368
+ exp["user_script"] = user_script
369
+ exp["user_args"] = user_args
370
+
371
+ cmd = ["deepspeed"] + exp["launcher_args"] + [user_script] + user_args
372
+
373
+ assert len(exp["launcher_args"]) > 0, "must provide launcher args"
374
+
375
+ with open(os.path.join(exp_dir, "cmd.txt"), "w", buffering=BUFSIZE) as fd:
376
+ fd.write(" ".join(cmd))
377
+ fd.write("\n")
378
+ fd.flush()
379
+ os.fsync(fd)
380
+
381
+ logger.info(
382
+ f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}"
383
+ )
384
+
385
+ with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open(os.path.join(exp_dir, "stderr.log"),
386
+ "wb") as err:
387
+ result = subprocess.Popen(cmd, stdout=out, stderr=err)
388
+ result.wait()
389
+ out.flush()
390
+ err.flush()
391
+ os.fsync(out)
392
+ os.fsync(err)
393
+
394
+ clean_up(exp, reservations)
395
+
396
+ logger.info(f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}")
397
+
398
+
399
+ PDSH_MAX_FAN_OUT = 1024
400
+
401
+
402
+ def clean_up(exp: dict, reservations):
403
+ env = os.environ.copy()
404
+ env['PDSH_RCMD_TYPE'] = 'ssh'
405
+
406
+ nodes_str = ""
407
+ for reservation in reservations:
408
+ nodes_str += f"{reservation.node.host},"
409
+ nodes_str = nodes_str[:-1]
410
+ logger.debug(f"Cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
411
+
412
+ # PDSH flags for max node fan out and specific hosts to launch on
413
+ # See https://linux.die.net/man/1/pdsh for flag details
414
+ pdsh_cmd = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', nodes_str]
415
+
416
+ kill_cmd = [
417
+ 'pkill',
418
+ '-f',
419
+ exp['name'],
420
+ ]
421
+ cmd = pdsh_cmd + kill_cmd
422
+ logger.debug("cmd = {}".format(' '.join(cmd)))
423
+
424
+ result = subprocess.Popen(cmd, env=env)
425
+ result.wait()
426
+
427
+ # In case of failure must propagate the error-condition back to the caller (usually shell). The
428
+ # actual error and traceback should have been printed in the subprocess, so in order to avoid
429
+ # unnecessary noise we just quietly exit here with the same code as the subprocess
430
+ if result.returncode > 0:
431
+ sys.exit(result.returncode)
432
+
433
+ logger.info(f"Done cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .index_based_tuner import RandomTuner, GridSearchTuner
7
+ # from .ga_tuner import GATuner
8
+ from .model_based_tuner import ModelBasedTuner
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (330 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/base_tuner.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/cost_model.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/index_based_tuner.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/model_based_tuner.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/base_tuner.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+
8
+ from deepspeed.autotuning.constants import *
9
+ from deepspeed.autotuning.utils import write_experiments
10
+ from deepspeed.utils import logger
11
+
12
+
13
+ class BaseTuner:
14
+
15
+ def __init__(self, exps, resource_manager, metric):
16
+ self.all_exps = exps
17
+ self.rm = resource_manager
18
+ self.best_iter = 0
19
+ self.best_exp = None
20
+ self.best_metric_val = None
21
+ self.metric = metric if metric else AUTOTUNING_METRIC_DEFAULT
22
+ logger.info(f"total number of exps = {len(self.all_exps)}")
23
+
24
+ def has_next(self):
25
+ """Whether there exists more configurations for evaluation"""
26
+ if len(self.all_exps) > 0:
27
+ return True
28
+ else:
29
+ return False
30
+
31
+ def next_batch(self, sample_size):
32
+ """Select the next batch of configurations for evaluation"""
33
+ raise NotImplementedError
34
+
35
+ def update(self):
36
+ """"Update the tuner with what configurations have been evaluated and their performance results"""
37
+
38
+ def tune(self, sample_size=1, n_trials=1000, early_stopping=None):
39
+ i = 0
40
+ try:
41
+ while i < n_trials and self.has_next():
42
+ # Select the next batch of configuration for evaluation
43
+ sampled_exps = self.next_batch(sample_size)
44
+ # Generate experiments for measurement of performance
45
+ exp_paths = write_experiments(sampled_exps, self.rm.exps_dir)
46
+ self.rm.schedule_experiments(exp_paths)
47
+ self.rm.run()
48
+ exp, metric_val = self.rm.parse_results(self.metric)
49
+ if self.best_exp is None or self.best_metric_val is None or (metric_val
50
+ and metric_val > self.best_metric_val):
51
+ # logger.info(f"tuner finds better = {exp}")
52
+ self.best_exp = exp
53
+ self.best_metric_val = metric_val
54
+ self.best_iter = i
55
+
56
+ i += len(sampled_exps)
57
+
58
+ # Update the tuner with evaluated performance results
59
+ self.update()
60
+
61
+ self.rm.clear()
62
+
63
+ # Early stop if no more promising configurations are likely to be found
64
+ if early_stopping and i >= self.best_iter + early_stopping:
65
+ logger.info(
66
+ f"Tuner early stopped at iteration {i}. Best iteration is {self.best_iter}. Early stopping threshold is {early_stopping}"
67
+ )
68
+ break
69
+ return i
70
+ except:
71
+ logger.info("Tuner Error:", sys.exc_info()[0])
72
+ return i
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/cost_model.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .utils import *
7
+
8
+ try:
9
+ import xgboost as xgb
10
+ except ImportError:
11
+ xgb = None
12
+
13
+
14
+ class XGBoostCostModel():
15
+
16
+ def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None):
17
+
18
+ assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra."
19
+
20
+ self.loss_type = loss_type
21
+
22
+ if loss_type == "reg":
23
+ self.xgb_params = {
24
+ "max_depth": 3,
25
+ "gamma": 0.0001,
26
+ "min_child_weight": 1,
27
+ "subsample": 1.0,
28
+ "eta": 0.3,
29
+ "lambda": 1.0,
30
+ "alpha": 0,
31
+ "objective": "reg:linear",
32
+ }
33
+ elif loss_type == "rank":
34
+ self.xgb_params = {
35
+ "max_depth": 3,
36
+ "gamma": 0.0001,
37
+ "min_child_weight": 1,
38
+ "subsample": 1.0,
39
+ "eta": 0.3,
40
+ "lambda": 1.0,
41
+ "alpha": 0,
42
+ "objective": "rank:pairwise",
43
+ }
44
+ else:
45
+ raise RuntimeError("Invalid loss type: " + loss_type)
46
+
47
+ self.xgb_params["verbosity"] = 0
48
+ if num_threads:
49
+ self.xgb_params["nthread"] = num_threads
50
+
51
+ def fit(self, xs, ys):
52
+ x_train = np.array(xs, dtype=np.float32)
53
+ y_train = np.array(ys, dtype=np.float32)
54
+ y_max = np.max(y_train)
55
+ y_train = y_train / max(y_max, 1e-9)
56
+
57
+ index = np.random.permutation(len(x_train))
58
+ dtrain = xgb.DMatrix(x_train[index], y_train[index])
59
+
60
+ self.bst = xgb.train(self.xgb_params, dtrain)
61
+
62
+ def predict(self, xs):
63
+
64
+ features = xgb.DMatrix(xs)
65
+
66
+ return self.bst.predict(features)
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/index_based_tuner.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import random
7
+
8
+ from .base_tuner import BaseTuner
9
+
10
+
11
+ class RandomTuner(BaseTuner):
12
+ """Explore the search space in random order"""
13
+
14
+ def __init__(self, exps: list, resource_manager, metric):
15
+ super().__init__(exps, resource_manager, metric)
16
+
17
+ def next_batch(self, sample_size=1):
18
+ if sample_size > len(self.all_exps):
19
+ sample_size = len(self.all_exps)
20
+
21
+ sampled_batch = random.sample(self.all_exps, sample_size)
22
+ self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
23
+
24
+ return sampled_batch
25
+
26
+
27
+ class GridSearchTuner(BaseTuner):
28
+ """Explore the search space in sequential order"""
29
+
30
+ def __init__(self, exps: list, resource_manager, metric):
31
+ super().__init__(exps, resource_manager, metric)
32
+
33
+ def next_batch(self, sample_size=1):
34
+ if sample_size > len(self.all_exps):
35
+ sample_size = len(self.all_exps)
36
+
37
+ sampled_batch = self.all_exps[0:sample_size]
38
+ self.all_exps = [x for x in self.all_exps if x not in sampled_batch]
39
+
40
+ return sampled_batch
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/model_based_tuner.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import hjson
7
+
8
+ from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
9
+ from .base_tuner import BaseTuner
10
+ from .cost_model import XGBoostCostModel
11
+ from .utils import *
12
+ from ..utils import *
13
+ import numbers
14
+ from ..constants import AUTOTUNING_METRIC_LATENCY
15
+
16
+ INIT_NUM = 2
17
+
18
+
19
+ class ModelBasedTuner(BaseTuner):
20
+ """Exploring the search space with a cost model"""
21
+
22
+ def __init__(self, exps: list, resource_manager, metric, tuning_space):
23
+ super().__init__(exps, resource_manager, metric)
24
+ self.tuning_space = tuning_space
25
+ self.best_iter = 0
26
+
27
+ self.all_configs = [e['ds_config'] for e in exps]
28
+ self.num_all_configs = len(self.all_configs)
29
+
30
+ self.dims = dict_to_dims(self.tuning_space)
31
+
32
+ logger.info(f"Create config dim: {self.dims}, all configs: {self.num_all_configs}")
33
+
34
+ self.visited = set([])
35
+
36
+ self.trials = []
37
+ self.trial_pt = 0
38
+
39
+ init_num = min(INIT_NUM, self.num_all_configs)
40
+
41
+ for _ in range(init_num):
42
+ exp_feature = np.random.randint(self.num_all_configs)
43
+ exp_feature = 0
44
+ while exp_feature in self.visited:
45
+ exp_feature = np.random.randint(self.num_all_configs)
46
+ self.trials.append(exp_feature)
47
+ self.visited.add(exp_feature)
48
+
49
+ self.cost_model = XGBoostCostModel("rank")
50
+
51
+ self.evaluated_configs = []
52
+ self.evaluated_perf = []
53
+
54
+ self.train_ct = 0
55
+
56
+ self.random_exploration_ratio = 0.2 # do random exploration
57
+
58
+ def find_estimated_top_configs(self):
59
+ """Use the cost model to predict the estimated performance of configurations and find the top ones for the next round of evaluation"""
60
+
61
+ configs = []
62
+
63
+ for c in self.all_configs:
64
+ flattened_ds_config = flatten(c)
65
+ feature_val = []
66
+ for k, v in flattened_ds_config.items():
67
+ if isinstance(v, numbers.Number):
68
+ feature_val.append(v)
69
+ configs.append(feature_val)
70
+ # print(configs)
71
+ # TODO the current implementation requires that all configs have the same shape.
72
+ configs = np.array(configs, dtype=np.float32)
73
+ estimates = self.cost_model.predict(configs)
74
+
75
+ n = len(estimates)
76
+ top_idx = np.argsort(estimates)
77
+ top_idx_ret = top_idx if self.metric == AUTOTUNING_METRIC_LATENCY else top_idx[::-1][:n]
78
+
79
+ # top_configs = [self.all_configs[i] for i in top_idx]
80
+
81
+ return top_idx_ret
82
+
83
+ def next_batch(self, sample_size):
84
+ sampled_batch = []
85
+
86
+ counter = 0
87
+ while counter < sample_size:
88
+
89
+ if len(self.visited) >= self.num_all_configs:
90
+ break
91
+
92
+ while self.trial_pt < len(self.trials):
93
+ logger.debug(f"trials: {self.trials}")
94
+ # Select top promising trials
95
+ index = self.trials[self.trial_pt]
96
+ if index not in self.visited:
97
+ break
98
+ self.trial_pt += 1
99
+
100
+ # To avoid over-exploitation, randomly select one that has not been explored.
101
+ rand = np.random.rand()
102
+ if rand < self.random_exploration_ratio:
103
+ # Do normal selection
104
+ feature = np.random.choice(self.trials)
105
+ while index in self.visited:
106
+ index = np.random.randint(self.num_all_configs)
107
+
108
+ # Need to track both the sampled configs and indices
109
+
110
+ sampled_batch.append(self.all_exps[index])
111
+ self.visited.add(index)
112
+ counter += 1
113
+
114
+ return sampled_batch
115
+
116
+ def has_next(self):
117
+ return len(self.visited) < self.num_all_configs
118
+
119
+ def update(self):
120
+ for exp_id, (exp, err) in self.rm.finished_experiments.items():
121
+ feature_val = []
122
+ if err:
123
+ logger.info(
124
+ f"Skipping exp_id = {exp_id}, exp_name = {exp['name']}, the experiment did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Please check the stderr.log in {exp['result_dir']}"
125
+ )
126
+ ds_config = exp["ds_config"]
127
+ flattened_ds_config = flatten(ds_config)
128
+ for k, v in flattened_ds_config.items():
129
+ if isinstance(v, numbers.Number):
130
+ feature_val.append(v)
131
+ self.evaluated_configs.append(feature_val)
132
+ self.evaluated_perf.append(0.0)
133
+ continue
134
+
135
+ p = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH]
136
+ with open(p, 'r') as f:
137
+ results = hjson.load(f)
138
+ curr_iter = results[self.metric]
139
+ logger.debug(f"parsing the results for {exp_id}, Result is {curr_iter}")
140
+
141
+ ds_config = exp["ds_config"]
142
+ flattened_ds_config = flatten(ds_config)
143
+ for k, v in flattened_ds_config.items():
144
+ if isinstance(v, numbers.Number):
145
+ feature_val.append(v)
146
+ self.evaluated_configs.append(feature_val)
147
+ self.evaluated_perf.append(curr_iter)
148
+
149
+ logger.debug(f"**Evaluated configs: {len(self.evaluated_configs)}, evaluated perf: {self.evaluated_perf}")
150
+
151
+ self.cost_model.fit(self.evaluated_configs, self.evaluated_perf)
152
+
153
+ estimated_top_configs = self.find_estimated_top_configs()
154
+
155
+ self.trials = estimated_top_configs
156
+ self.trial_pt = 0
157
+ self.train_ct += 1
venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/utils.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import numpy as np
7
+ import itertools
8
+ from ..utils import *
9
+ import collections.abc
10
+
11
+
12
+ def index_to_feature(p, dims):
13
+ """convert index form (single integer) to feature form (vector)"""
14
+ feature = []
15
+ for dim in dims:
16
+ feature.append(p % dim)
17
+ p //= dim
18
+ return feature
19
+
20
+
21
+ def feature_to_index(feature, dims):
22
+ """convert feature form (vector) to index form (single integer)"""
23
+ p = 0
24
+ for j, k in enumerate(feature):
25
+ print("j:", "k:", k, "dims", dims[:j])
26
+ p += int(np.prod(dims[:j])) * k
27
+ return p
28
+
29
+
30
+ def dict_to_dims(tuning_space):
31
+
32
+ dims = []
33
+
34
+ for key, val in tuning_space.items():
35
+ if isinstance(val, dict):
36
+ dims.extend(dict_to_dims(val))
37
+ elif isinstance(val, list):
38
+ dims.append(len(val))
39
+ else:
40
+ dims.append(1)
41
+
42
+ return dims
43
+
44
+
45
+ def gen_combinations(d: dict):
46
+ keys, values = d.keys(), d.values()
47
+ for v in values:
48
+ if not isinstance(v, list):
49
+ v = [v]
50
+ values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
51
+ for comb in itertools.product(*values_choices):
52
+ yield dict(zip(keys, comb))
53
+
54
+
55
+ def flatten(d, parent_key='', sep='_'):
56
+ items = []
57
+ for k, v in d.items():
58
+ new_key = parent_key + sep + k if parent_key else k
59
+ if isinstance(v, collections.abc.MutableMapping):
60
+ items.extend(flatten(v, new_key, sep=sep).items())
61
+ else:
62
+ items.append((new_key, v))
63
+ return dict(items)
64
+
65
+
66
+ def dict_to_feature(feature_dict, keys, max_value=None):
67
+ """Extract values from dict"""
68
+ feature = []
69
+ for key, val in feature_dict.items(): # First level
70
+ if key not in keys:
71
+ continue
72
+ if val is None or val == "auto" or key == "autotuning" or val == "":
73
+ continue
74
+ if isinstance(val, dict):
75
+ feature.append(dict_to_feature(val, max_value))
76
+ else:
77
+ feature.append(float(val))
78
+
79
+ # normalization, should not matter in tree models
80
+ if max_value is not None:
81
+ norm_feature = []
82
+ for f, mv in zip(feature, max_value):
83
+ norm_feature.append(f / mv)
84
+ feature = norm_feature
85
+
86
+ return feature
venv/lib/python3.10/site-packages/deepspeed/autotuning/utils.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import re
7
+ import collections.abc
8
+ import os
9
+ import json
10
+ from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
11
+ import itertools
12
+ import copy
13
+
14
+ from ..utils import logger
15
+
16
+
17
+ def search_error(filename):
18
+ if not os.path.exists(filename):
19
+ return "stderr.log does not exist"
20
+ with open(filename) as f:
21
+ for line in f:
22
+ for s in ["Error", "error", "ERROR"]:
23
+ idx = line.find(s)
24
+ if idx != -1:
25
+ return line[idx + len(s):].lstrip(": ")
26
+ return None
27
+
28
+
29
+ def was_interruptted(filename):
30
+ if not os.path.exists(filename):
31
+ return "stderr.log does not exist"
32
+ with open(filename) as f:
33
+ for line in f:
34
+ s = "KeyboardInterrupt"
35
+ idx = line.find(s)
36
+ if idx != -1:
37
+ return True
38
+ return False
39
+
40
+
41
+ def find_replace_str(value, replace_dict):
42
+ if not isinstance(value, str):
43
+ return str(value)
44
+
45
+ matches = re.findall(r"\$[\w]+", value)
46
+ for var in matches:
47
+ var_key = var.replace("$", "").lower()
48
+ if var_key == "nvme_path":
49
+ continue
50
+ assert var_key in replace_dict, f"unknown var key: {var_key}, in {replace_dict}"
51
+ if isinstance(replace_dict[var_key], str):
52
+ value = value.replace(var, replace_dict[var_key])
53
+ else:
54
+ assert len(matches) == 1, "unable to replace multiple non-string matches"
55
+ value = replace_dict[var_key]
56
+ return value
57
+
58
+
59
+ def find_replace(target, replace_dict):
60
+ if isinstance(target, dict):
61
+ for key, value in target.items():
62
+ if isinstance(value, str):
63
+ target[key] = find_replace_str(value, replace_dict)
64
+ if isinstance(value, list):
65
+ for i in range(len(value)):
66
+ value[i] = find_replace_str(value[i], replace_dict)
67
+ if isinstance(value, dict):
68
+ find_replace(value, replace_dict)
69
+ elif isinstance(target, list):
70
+ for i in range(len(target)):
71
+ target[i] = str(find_replace_str(target[i], replace_dict))
72
+
73
+
74
+ def get_list(val):
75
+ if not isinstance(val, list):
76
+ return [val]
77
+ else:
78
+ return val
79
+
80
+
81
+ def combine_dict(d, u):
82
+ for k, v in u.items():
83
+ if isinstance(v, collections.abc.Mapping):
84
+ d[k] = combine_dict(d.get(k, {}), v)
85
+ else:
86
+ if k not in d:
87
+ d[k] = v
88
+ else:
89
+ if not isinstance(d[k], list):
90
+ d[k] = [d[k]]
91
+ d[k].extend(i for i in get_list(v) if i not in d[k])
92
+ return d
93
+
94
+
95
+ def del_if_exists(t, d):
96
+ """Deletes a key from a dictionary if it exists.
97
+
98
+ Args:
99
+ t (string): target key to delete
100
+ d (dict): dictionary to delete from
101
+ """
102
+ if t in d:
103
+ del d[t]
104
+ return
105
+ for k, v in d.items():
106
+ if isinstance(v, collections.abc.Mapping):
107
+ del_if_exists(t, v)
108
+
109
+
110
+ def replace_dict(d, u, ignored_keys=[]):
111
+ """Replaces values in dict d with values in dict u.
112
+
113
+ Args:
114
+ d (dict): the target dict to overwrite
115
+ u (dict): the dict containing the values to overwrite the target dict
116
+
117
+ Returns:
118
+ dict d with values overwritten by the corresponding ones in dict u.
119
+ """
120
+ if u is not None:
121
+ for k, v in u.items():
122
+ if k not in ignored_keys:
123
+ if v is None:
124
+ del_if_exists(k, d)
125
+ continue
126
+ if isinstance(v, collections.abc.Mapping):
127
+ d[k] = replace_dict(d.get(k, {}), v, ignored_keys)
128
+ else:
129
+ d[k] = v
130
+ return d
131
+
132
+
133
+ def get_val_by_key(d: dict, k):
134
+ if k in d:
135
+ return d[k]
136
+ for v in d.values():
137
+ if isinstance(v, dict):
138
+ return get_val_by_key(v, k)
139
+ return None
140
+
141
+
142
+ def set_val_by_key(d: dict, k, vv):
143
+ if k in d:
144
+ d[k] = vv
145
+ for v in d.values():
146
+ if isinstance(v, dict):
147
+ set_val_by_key(v, k, vv)
148
+
149
+
150
+ def fetch_hostfile(hostfile_path):
151
+ if not os.path.isfile(hostfile_path):
152
+ logger.warning("Unable to find hostfile, will proceed with training "
153
+ "with local resources only.")
154
+ return None
155
+
156
+ # e.g., worker-0 slots=16
157
+ with open(hostfile_path, 'r') as fd:
158
+ resource_pool = collections.OrderedDict()
159
+ for line in fd.readlines():
160
+ line = line.strip()
161
+ if line == '':
162
+ # skip empty lines
163
+ continue
164
+ try:
165
+ hostname, slots = line.split()
166
+ _, slot_count = slots.split("=")
167
+ slot_count = int(slot_count)
168
+ except ValueError as err:
169
+ logger.error("Hostfile is not formatted correctly, unable to "
170
+ "proceed with training.")
171
+ raise err
172
+ if hostname in resource_pool:
173
+ logger.error("Hostfile contains duplicate hosts, unable to "
174
+ "proceed with training.")
175
+ raise ValueError("host {} is already defined".format(hostname))
176
+ resource_pool[hostname] = slot_count
177
+
178
+ return resource_pool
179
+
180
+
181
+ def validate_ds_config(config: dict):
182
+
183
+ def is_False(config: dict, key):
184
+ if config is None:
185
+ return False
186
+ return bool(config.get(key))
187
+
188
+ config_zero = config.get("zero_optimization", {})
189
+ if not config_zero:
190
+ return True
191
+ stage = config_zero.get("stage")
192
+ offload = False
193
+ if stage == 1:
194
+ return True
195
+ elif stage == 2:
196
+ if is_False(config_zero, "cpu_offload") and is_False(config_zero, "cpu_offload_params"):
197
+ return False
198
+ elif stage == 3:
199
+ offload_devices = ["cpu", "nvme"]
200
+ if config_zero.get("offload_optimizer", {}).get("device") in offload_devices:
201
+ offload = True
202
+ if config_zero.get("offload_param", {}).get("device") in offload_devices:
203
+ offload = True
204
+ else:
205
+ return True
206
+
207
+ # HF requires that "ZeRO Offload can only work with DeepSpeed optimizers"
208
+ if offload and not config.get("optimizer"):
209
+ return False
210
+
211
+ return True
212
+
213
+
214
+ def remove_dupe_dicts(l):
215
+ """ Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures.
216
+
217
+ Args:
218
+ l (list): a list of (nested) data structures.
219
+
220
+ Returns:
221
+ A list of unique values.
222
+ """
223
+ list_of_strings = [json.dumps(d, sort_keys=True) for d in l]
224
+ list_of_strings = set(list_of_strings)
225
+ return [json.loads(s) for s in list_of_strings]
226
+
227
+
228
+ def prune_config(config, ignored_keys=[]):
229
+ """ Prunes the input configurations
230
+
231
+ Args:
232
+ configs (dict): A configuration dictionary.
233
+ ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
234
+
235
+ Returns:
236
+ A configuration dictionary.
237
+ """
238
+ if ignored_keys:
239
+ for k in ignored_keys:
240
+
241
+ def find_del_key(d: dict, k: str):
242
+ if k in d:
243
+ del d[k]
244
+ else:
245
+ for dd in d.values():
246
+ if isinstance(dd, dict):
247
+ find_del_key(dd, k)
248
+
249
+ find_del_key(config, k)
250
+
251
+
252
+ def prune_configs(configs, ignored_keys=[]):
253
+ """ Prunes the input list of configurations
254
+
255
+ Args:
256
+ configs (list): A list of configuration dictionaries.
257
+ ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
258
+
259
+ Returns:
260
+ A list of valid and unique configuration dictionaries.
261
+ """
262
+ pruned_list = []
263
+ for config in configs:
264
+ prune_config(config, ignored_keys)
265
+ pruned_list.append(config)
266
+
267
+ return remove_dupe_dicts(pruned_list)
268
+
269
+
270
+ def get_tuning_keys(tuning_space: dict):
271
+ """Outputs the list of tunable parameters in the tuning space dict.
272
+
273
+ Args:
274
+ tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values.
275
+
276
+ Returns:
277
+ A list of strings
278
+ """
279
+ tuning_keys = []
280
+ for key, val in tuning_space.items():
281
+ if isinstance(val, dict):
282
+ tuning_keys.extend(get_tuning_keys(val))
283
+ if isinstance(val, list) and len(val) > 1:
284
+ tuning_keys.append(key)
285
+ return tuning_keys
286
+
287
+
288
+ def get_all_configs(tuning_space: dict, ignore_keys=None):
289
+ """ Splits the tuning space dictionary to result in all combinations of values.
290
+
291
+ Args:
292
+ tuning_space (dict): the tuning space where tunable parameters are lists of values.
293
+ """
294
+
295
+ def gen_combinations(d: dict):
296
+ keys, values = d.keys(), d.values()
297
+ for v in values:
298
+ if not isinstance(v, list):
299
+ v = [v]
300
+ values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values)
301
+ for comb in itertools.product(*values_choices):
302
+ yield dict(zip(keys, comb))
303
+
304
+ all_configs = []
305
+ ignored_key_vals = {}
306
+ for ik in ignore_keys:
307
+ ignored_key_vals[ik] = tuning_space.get(ik, {})
308
+ del_if_exists(ik, tuning_space)
309
+ for c in gen_combinations(tuning_space):
310
+ replace_dict(c, ignored_key_vals)
311
+ all_configs.append(c)
312
+ return all_configs
313
+
314
+
315
+ def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False):
316
+ """ Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys.
317
+ Args:
318
+ config (dict): the config dict used to generate the name
319
+ tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None.
320
+ prefix (str, optional): a string added to the beginning of the name. Defaults to None.
321
+ """
322
+ if TRAIN_MICRO_BATCH_SIZE_PER_GPU not in tuning_keys:
323
+ tuning_keys.append(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
324
+ if GRADIENT_ACCUMULATION_STEPS not in tuning_keys:
325
+ tuning_keys.append(GRADIENT_ACCUMULATION_STEPS)
326
+ tuning_keys.sort()
327
+
328
+ def get_offload_name(offload_config):
329
+ cname = ""
330
+ if offload_config is None:
331
+ return "None_"
332
+ for key, val in offload_config.items():
333
+ key = "".join(map(lambda c: c[0], key.split('_')))
334
+ if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
335
+ cname += key + '{:.1e}'.format(val) + "_"
336
+ else:
337
+ if isinstance(val, bool):
338
+ val = "T" if val else "F"
339
+ cname += f"{key}{val}_"
340
+ return cname
341
+
342
+ def get_name_by_keys(config: dict, tuning_keys=None, omit_val=False):
343
+ cname = ""
344
+ if not tuning_keys or config is None:
345
+ return cname
346
+ for key, val in config.items():
347
+ # skip the arg_mappings section when naming the exp file
348
+ if key == "arg_mappings":
349
+ continue
350
+ if key == "offload_param":
351
+ cname += "op_"
352
+ if not omit_val:
353
+ cname += get_offload_name(val)
354
+ continue
355
+ if key == "offload_optimizer":
356
+ cname += "oo_"
357
+ if not omit_val:
358
+ cname += get_offload_name(val)
359
+ continue
360
+ # recursively call the func to get name for the child dicts
361
+ if isinstance(val, dict):
362
+ n = get_name_by_keys(val, tuning_keys, omit_val=omit_val)
363
+ if n != "":
364
+ cname += n + "_"
365
+ if tuning_keys and key not in tuning_keys:
366
+ continue
367
+
368
+ key_str = "".join(map(lambda c: c[0], key.split('_')))
369
+
370
+ if not omit_val:
371
+ if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
372
+ cname += key_str + '{:.1e}'.format(val) + "_"
373
+ else:
374
+ if isinstance(val, bool):
375
+ val = "T" if val else "F"
376
+ cname += f"{key_str}{val}_"
377
+ else:
378
+ cname += key_str + "_"
379
+
380
+ return cname[:-1]
381
+
382
+ name = get_name_by_keys(config, tuning_keys, omit_val=omit_val)
383
+
384
+ return prefix + (name if name != "" else "exp")
385
+
386
+
387
+ def get_first_config(config: dict):
388
+ if not config:
389
+ return None
390
+ cfg = copy.deepcopy(config)
391
+
392
+ for key, val in cfg.items():
393
+ if isinstance(val, dict):
394
+ if key == "optimizer": # use user defined optimizer which might have lists of values as params
395
+ cfg[key] = val
396
+ else:
397
+ cfg[key] = get_first_config(val)
398
+ if isinstance(val, list) and len(val) > 0:
399
+ cfg[key] = val[0]
400
+ return cfg
401
+
402
+
403
+ def write_experiments(exps: list, exps_dir: str):
404
+ exp_paths = []
405
+ for exp in exps:
406
+ exp_name = exp['name']
407
+ # write the expr config to a json file
408
+ exp_path = os.path.join(exps_dir, f'{exp_name}.json')
409
+ with open(exp_path, 'w') as fd:
410
+
411
+ json.dump(exp, fd)
412
+ exp_paths.append(exp_path)
413
+ return exp_paths
414
+
415
+
416
+ def memory_to_string(n, postfix="", units=None, precision=2):
417
+ if units is None:
418
+ if n // 10**12 > 0:
419
+ return str(round(n / 1024**4, precision)) + " T" + postfix
420
+ if n // 10**9 > 0:
421
+ return str(round(n / 1024**3, precision)) + " G" + postfix
422
+ elif n // 10**6 > 0:
423
+ return str(round(n / 1024**2, precision)) + " M" + postfix
424
+ elif n // 10**3 > 0:
425
+ return str(round(n / 1014, precision)) + " K" + postfix
426
+ else:
427
+ return str(n) + " "
428
+ else:
429
+ if units == "T":
430
+ return str(round(n / 1024**4, precision)) + " " + units
431
+ if units == "G" + postfix:
432
+ return str(round(n / 1024**3, precision)) + " " + units
433
+ elif units == "M" + postfix:
434
+ return str(round(n / 1024**2, precision)) + " " + units
435
+ elif units == "K" + postfix:
436
+ return str(round(n / 1024, precision)) + " " + units
437
+ else:
438
+ return str(n) + " "
439
+
440
+
441
+ def number_to_string(n, postfix="", units=None, precision=2):
442
+ if units is None:
443
+ if n // 10**9 > 0:
444
+ return str(round(n / 1000**3, precision)) + " B" + postfix
445
+ if n // 10**6 > 0:
446
+ return str(round(n / 1000**2, precision)) + " M" + postfix
447
+ elif n // 10**3 > 0:
448
+ return str(round(n / 1000**1, precision)) + " K" + postfix
449
+ else:
450
+ return str(n) + " "
451
+ else:
452
+ if units == "B" + postfix:
453
+ return str(round(n / 1000**3, precision)) + " " + units
454
+ elif units == "M" + postfix:
455
+ return str(round(n / 1000**2, precision)) + " " + units
456
+ elif units == "K" + postfix:
457
+ return str(round(n / 1000**1, precision)) + " " + units
458
+ else:
459
+ return str(n) + " "
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (527 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elastic_agent.cpython-310.pyc ADDED
Binary file (5.86 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elasticity.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from . import adam
7
+ from . import adagrad
8
+ from . import lamb
9
+ from . import lion
10
+ from . import sparse_attention
11
+ from . import transformer
12
+
13
+ from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
14
+
15
+ from ..git_version_info import compatible_ops as __compatible_ops__
venv/lib/python3.10/site-packages/deepspeed/ops/aio/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from ..op_builder import AsyncIOBuilder
venv/lib/python3.10/site-packages/deepspeed/ops/aio/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (234 Bytes). View file