applied-ai-018 commited on
Commit
0cee4ac
·
verified ·
1 Parent(s): c58af2c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_73_mp_rank_01_optim_states.pt +3 -0
  3. ckpts/llama-3b/global_step100/layer_04-model_00-model_states.pt +3 -0
  4. venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so +3 -0
  5. venv/lib/python3.10/site-packages/deepspeed/__init__.py +348 -0
  6. venv/lib/python3.10/site-packages/deepspeed/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/__pycache__/constants.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/__pycache__/env_report.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info_installed.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/__pycache__/pydantic_v1.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/deepspeed/constants.py +21 -0
  13. venv/lib/python3.10/site-packages/deepspeed/env_report.py +195 -0
  14. venv/lib/python3.10/site-packages/deepspeed/git_version_info.py +31 -0
  15. venv/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py +6 -0
  16. venv/lib/python3.10/site-packages/deepspeed/pydantic_v1.py +16 -0
  17. venv/lib/python3.10/site-packages/deepspeed/runtime/__init__.py +4 -0
  18. venv/lib/python3.10/site-packages/deepspeed/runtime/bf16_optimizer.py +556 -0
  19. venv/lib/python3.10/site-packages/deepspeed/runtime/compiler.py +166 -0
  20. venv/lib/python3.10/site-packages/deepspeed/runtime/config.py +1039 -0
  21. venv/lib/python3.10/site-packages/deepspeed/runtime/constants.py +457 -0
  22. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__init__.py +5 -0
  23. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/config.py +168 -0
  24. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/constants.py +116 -0
  25. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/curriculum_scheduler.py +158 -0
  26. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py +5 -0
  27. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py +880 -0
  28. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py +349 -0
  29. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py +627 -0
  30. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/utils.py +52 -0
  31. venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/runtime/lr_schedules.py +878 -0
  33. venv/lib/python3.10/site-packages/deepspeed/runtime/quantize.py +180 -0
  34. venv/lib/python3.10/site-packages/deepspeed/utils/__init__.py +22 -0
  35. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/bwc.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/comms_logging.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/debug.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/exceptions.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/groups.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/init_on_device.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/logging.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/mixed_precision_linkage.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/numa.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/nvtx.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/tensor_fragment.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/timer.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/torch.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/types.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/z3_leaf_module.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -133,3 +133,6 @@ venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filte
133
  venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
134
  venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
135
  venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
133
  venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
134
  venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
135
  venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
136
+ venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
137
+ venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
138
+ venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_73_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081c3fe30e2795fbebbb341a47b983accf3111d9cfe7e6ebd92947a65c70d497
3
+ size 41830202
ckpts/llama-3b/global_step100/layer_04-model_00-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3889c55c2ae1034257f57e67f9a9e2cac9989c5cbde48fb99194c3f2def5d1cc
3
+ size 116407086
venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e81bf00d65473806844c2e1800e77c23d62a118255ab7c848c6619b444de4a6e
3
+ size 2586576
venv/lib/python3.10/site-packages/deepspeed/__init__.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+ import types
8
+ import json
9
+ from typing import Optional, Union
10
+ import torch
11
+ from torch.optim import Optimizer
12
+ from torch.optim.lr_scheduler import _LRScheduler
13
+ from packaging import version as pkg_version
14
+
15
+ # Skip Triton import for AMD due to pytorch-triton-rocm module breaking device API in DeepSpeed
16
+ if not (hasattr(torch.version, 'hip') and torch.version.hip is not None):
17
+ try:
18
+ import triton # noqa: F401 # type: ignore
19
+ HAS_TRITON = True
20
+ except ImportError:
21
+ HAS_TRITON = False
22
+ else:
23
+ HAS_TRITON = False
24
+
25
+ from . import ops
26
+ from . import module_inject
27
+
28
+ from .accelerator import get_accelerator
29
+ from .constants import TORCH_DISTRIBUTED_DEFAULT_PORT
30
+ from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpeedSchedulerCallable
31
+ from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER
32
+ from .runtime.hybrid_engine import DeepSpeedHybridEngine
33
+ from .runtime.pipe.engine import PipelineEngine
34
+ from .inference.engine import InferenceEngine
35
+ from .inference.config import DeepSpeedInferenceConfig
36
+ from .runtime.lr_schedules import add_tuning_arguments
37
+ from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError
38
+ from .runtime.activation_checkpointing import checkpointing
39
+ from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
40
+ from .module_inject import replace_transformer_layer, revert_transformer_layer
41
+
42
+ from .utils import log_dist, OnDevice, logger
43
+ from .comm.comm import init_distributed
44
+
45
+ from .runtime import zero
46
+ from .runtime.compiler import is_compile_supported
47
+
48
+ from .pipe import PipelineModule
49
+
50
+ from .git_version_info import version, git_hash, git_branch
51
+
52
+
53
+ def _parse_version(version_str):
54
+ '''Parse a version string and extract the major, minor, and patch versions.'''
55
+ ver = pkg_version.parse(version_str)
56
+ return ver.major, ver.minor, ver.micro
57
+
58
+
59
+ # Export version information
60
+ __version__ = version
61
+ __version_major__, __version_minor__, __version_patch__ = _parse_version(__version__)
62
+ __git_hash__ = git_hash
63
+ __git_branch__ = git_branch
64
+
65
+ # Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init
66
+ dist = None
67
+
68
+
69
+ def initialize(args=None,
70
+ model: torch.nn.Module = None,
71
+ optimizer: Optional[Union[Optimizer, DeepSpeedOptimizerCallable]] = None,
72
+ model_parameters: Optional[torch.nn.Module] = None,
73
+ training_data: Optional[torch.utils.data.Dataset] = None,
74
+ lr_scheduler: Optional[Union[_LRScheduler, DeepSpeedSchedulerCallable]] = None,
75
+ distributed_port: int = TORCH_DISTRIBUTED_DEFAULT_PORT,
76
+ mpu=None,
77
+ dist_init_required: Optional[bool] = None,
78
+ collate_fn=None,
79
+ config=None,
80
+ config_params=None):
81
+ """Initialize the DeepSpeed Engine.
82
+
83
+ Arguments:
84
+ args: an object containing local_rank and deepspeed_config fields.
85
+ This is optional if `config` is passed.
86
+
87
+ model: Required: nn.module class before apply any wrappers
88
+
89
+ optimizer: Optional: a user defined Optimizer or Callable that returns an Optimizer object.
90
+ This overrides any optimizer definition in the DeepSpeed json config.
91
+
92
+ model_parameters: Optional: An iterable of torch.Tensors or dicts.
93
+ Specifies what Tensors should be optimized.
94
+
95
+ training_data: Optional: Dataset of type torch.utils.data.Dataset
96
+
97
+ lr_scheduler: Optional: Learning Rate Scheduler Object or a Callable that takes an Optimizer and returns a Scheduler object.
98
+ The scheduler object should define a get_lr(), step(), state_dict(), and load_state_dict() methods
99
+
100
+ distributed_port: Optional: Master node (rank 0)'s free port that needs to be used for communication during distributed training
101
+
102
+ mpu: Optional: A model parallelism unit object that implements
103
+ get_{model,data}_parallel_{rank,group,world_size}()
104
+
105
+ dist_init_required: Optional: None will auto-initialize torch distributed if needed,
106
+ otherwise the user can force it to be initialized or not via boolean.
107
+
108
+ collate_fn: Optional: Merges a list of samples to form a
109
+ mini-batch of Tensor(s). Used when using batched loading from a
110
+ map-style dataset.
111
+
112
+ config: Optional: Instead of requiring args.deepspeed_config you can pass your deepspeed config
113
+ as an argument instead, as a path or a dictionary.
114
+
115
+ config_params: Optional: Same as `config`, kept for backwards compatibility.
116
+
117
+ Returns:
118
+ A tuple of ``engine``, ``optimizer``, ``training_dataloader``, ``lr_scheduler``
119
+
120
+ * ``engine``: DeepSpeed runtime engine which wraps the client model for distributed training.
121
+
122
+ * ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if
123
+ optimizer is specified in json config else ``None``.
124
+
125
+ * ``training_dataloader``: DeepSpeed dataloader if ``training_data`` was supplied,
126
+ otherwise ``None``.
127
+
128
+ * ``lr_scheduler``: Wrapped lr scheduler if user ``lr_scheduler`` is passed, or
129
+ if ``lr_scheduler`` specified in JSON configuration. Otherwise ``None``.
130
+ """
131
+ log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
132
+ __git_branch__),
133
+ ranks=[0])
134
+
135
+ # Disable zero.Init context if it's currently enabled
136
+ zero.partition_parameters.shutdown_init_context()
137
+
138
+ assert model is not None, "deepspeed.initialize requires a model"
139
+
140
+ global dist
141
+ from deepspeed import comm as dist
142
+ dist_backend = get_accelerator().communication_backend_name()
143
+ dist.init_distributed(dist_backend=dist_backend,
144
+ distributed_port=distributed_port,
145
+ dist_init_required=dist_init_required)
146
+
147
+ # Set config using config_params for backwards compat
148
+ if config is None and config_params is not None:
149
+ config = config_params
150
+
151
+ # Check for deepscale_config for backwards compat
152
+ if hasattr(args, "deepscale_config") and args.deepscale_config is not None:
153
+ logger.warning("************ --deepscale_config is deprecated, please use --deepspeed_config ************")
154
+ if hasattr(args, "deepspeed_config"):
155
+ assert (args.deepspeed_config is
156
+ None), "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
157
+ args.deepspeed_config = args.deepscale_config
158
+ args.deepscale_config = None
159
+
160
+ # Check that we have only one config passed
161
+ if hasattr(args, "deepspeed_config") and args.deepspeed_config is not None:
162
+ assert config is None, "Not sure how to proceed, we were given deepspeed configs in the deepspeed arguments and deepspeed.initialize() function call"
163
+ config = args.deepspeed_config
164
+ assert config is not None, "DeepSpeed requires --deepspeed_config to specify configuration file"
165
+
166
+ if not isinstance(model, PipelineModule):
167
+ config_class = DeepSpeedConfig(config, mpu)
168
+ if config_class.hybrid_engine.enabled:
169
+ engine = DeepSpeedHybridEngine(args=args,
170
+ model=model,
171
+ optimizer=optimizer,
172
+ model_parameters=model_parameters,
173
+ training_data=training_data,
174
+ lr_scheduler=lr_scheduler,
175
+ mpu=mpu,
176
+ dist_init_required=dist_init_required,
177
+ collate_fn=collate_fn,
178
+ config=config,
179
+ config_class=config_class)
180
+ else:
181
+ engine = DeepSpeedEngine(args=args,
182
+ model=model,
183
+ optimizer=optimizer,
184
+ model_parameters=model_parameters,
185
+ training_data=training_data,
186
+ lr_scheduler=lr_scheduler,
187
+ mpu=mpu,
188
+ dist_init_required=dist_init_required,
189
+ collate_fn=collate_fn,
190
+ config=config,
191
+ config_class=config_class)
192
+ else:
193
+ assert mpu is None, "mpu must be None with pipeline parallelism"
194
+ mpu = model.mpu()
195
+ config_class = DeepSpeedConfig(config, mpu)
196
+ engine = PipelineEngine(args=args,
197
+ model=model,
198
+ optimizer=optimizer,
199
+ model_parameters=model_parameters,
200
+ training_data=training_data,
201
+ lr_scheduler=lr_scheduler,
202
+ mpu=mpu,
203
+ dist_init_required=dist_init_required,
204
+ collate_fn=collate_fn,
205
+ config=config,
206
+ config_class=config_class)
207
+
208
+ # Restore zero.Init context if necessary
209
+ zero.partition_parameters.restore_init_context()
210
+
211
+ return_items = [engine, engine.optimizer, engine.training_dataloader, engine.lr_scheduler]
212
+ return tuple(return_items)
213
+
214
+
215
+ def _add_core_arguments(parser):
216
+ r"""Helper (internal) function to update an argument parser with an argument group of the core DeepSpeed arguments.
217
+ The core set of DeepSpeed arguments include the following:
218
+ 1) --deepspeed: boolean flag to enable DeepSpeed
219
+ 2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
220
+
221
+ This is a helper function to the public add_config_arguments()
222
+
223
+ Arguments:
224
+ parser: argument parser
225
+ Return:
226
+ parser: Updated Parser
227
+ """
228
+ group = parser.add_argument_group('DeepSpeed', 'DeepSpeed configurations')
229
+
230
+ group.add_argument('--deepspeed',
231
+ default=False,
232
+ action='store_true',
233
+ help='Enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
234
+
235
+ group.add_argument('--deepspeed_config', default=None, type=str, help='DeepSpeed json configuration file.')
236
+
237
+ group.add_argument('--deepscale',
238
+ default=False,
239
+ action='store_true',
240
+ help='Deprecated enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
241
+
242
+ group.add_argument('--deepscale_config',
243
+ default=None,
244
+ type=str,
245
+ help='Deprecated DeepSpeed json configuration file.')
246
+
247
+ return parser
248
+
249
+
250
+ def add_config_arguments(parser):
251
+ r"""Update the argument parser to enabling parsing of DeepSpeed command line arguments.
252
+ The set of DeepSpeed arguments include the following:
253
+ 1) --deepspeed: boolean flag to enable DeepSpeed
254
+ 2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
255
+
256
+ Arguments:
257
+ parser: argument parser
258
+ Return:
259
+ parser: Updated Parser
260
+ """
261
+ parser = _add_core_arguments(parser)
262
+
263
+ return parser
264
+
265
+
266
+ def default_inference_config():
267
+ """
268
+ Return a default DeepSpeed inference configuration dictionary.
269
+ """
270
+ return DeepSpeedInferenceConfig().dict()
271
+
272
+
273
+ def init_inference(model, config=None, **kwargs):
274
+ """Initialize the DeepSpeed InferenceEngine.
275
+
276
+ Description: all four cases are valid and supported in DS init_inference() API.
277
+
278
+ # Case 1: user provides no config and no kwargs. Default config will be used.
279
+
280
+ .. code-block:: python
281
+
282
+ generator.model = deepspeed.init_inference(generator.model)
283
+ string = generator("DeepSpeed is")
284
+ print(string)
285
+
286
+ # Case 2: user provides a config and no kwargs. User supplied config will be used.
287
+
288
+ .. code-block:: python
289
+
290
+ generator.model = deepspeed.init_inference(generator.model, config=config)
291
+ string = generator("DeepSpeed is")
292
+ print(string)
293
+
294
+ # Case 3: user provides no config and uses keyword arguments (kwargs) only.
295
+
296
+ .. code-block:: python
297
+
298
+ generator.model = deepspeed.init_inference(generator.model,
299
+ tensor_parallel={"tp_size": world_size},
300
+ dtype=torch.half,
301
+ replace_with_kernel_inject=True)
302
+ string = generator("DeepSpeed is")
303
+ print(string)
304
+
305
+ # Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence.
306
+
307
+ .. code-block:: python
308
+
309
+ generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True)
310
+ string = generator("DeepSpeed is")
311
+ print(string)
312
+
313
+ Arguments:
314
+ model: Required: original nn.module object without any wrappers
315
+
316
+ config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file
317
+
318
+ Returns:
319
+ A deepspeed.InferenceEngine wrapped model.
320
+ """
321
+ log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
322
+ __git_branch__),
323
+ ranks=[0])
324
+
325
+ # Load config_dict from config first
326
+ if config is None:
327
+ config = {}
328
+ if isinstance(config, str):
329
+ with open(config, "r") as f:
330
+ config_dict = json.load(f)
331
+ elif isinstance(config, dict):
332
+ config_dict = config
333
+ else:
334
+ raise ValueError(f"'config' argument expected string or dictionary, got {type(config)}")
335
+
336
+ # Update with values from kwargs, ensuring no conflicting overlap between config and kwargs
337
+ overlap_keys = set(config_dict.keys()).intersection(kwargs.keys())
338
+ # If there is overlap, error out if values are different
339
+ for key in overlap_keys:
340
+ if config_dict[key] != kwargs[key]:
341
+ raise ValueError(f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}")
342
+ config_dict.update(kwargs)
343
+
344
+ ds_inference_config = DeepSpeedInferenceConfig(**config_dict)
345
+
346
+ engine = InferenceEngine(model, config=ds_inference_config)
347
+
348
+ return engine
venv/lib/python3.10/site-packages/deepspeed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/__pycache__/constants.cpython-310.pyc ADDED
Binary file (468 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/__pycache__/env_report.cpython-310.pyc ADDED
Binary file (6.83 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info.cpython-310.pyc ADDED
Binary file (830 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info_installed.cpython-310.pyc ADDED
Binary file (524 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/__pycache__/pydantic_v1.cpython-310.pyc ADDED
Binary file (625 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/constants.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from datetime import timedelta
8
+
9
+ #############################################
10
+ # Torch distributed constants
11
+ #############################################
12
+ TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
13
+
14
+ # Default process group wide timeout, if applicable.
15
+ # This only applies to the gloo and nccl backends
16
+ # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
17
+ # To make an attempt at backwards compatibility with THD, we use an
18
+ # extraordinarily high default timeout, given that THD did not have timeouts.
19
+ default_pg_timeout = timedelta(minutes=int(os.getenv("DEEPSPEED_TIMEOUT", default=30)))
20
+ INFERENCE_GENERIC_MODE = 'generic'
21
+ INFERENCE_SPECIALIZED_MODE = 'specialized'
venv/lib/python3.10/site-packages/deepspeed/env_report.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ import deepspeed
9
+ import subprocess
10
+ import argparse
11
+ from .ops.op_builder.all_ops import ALL_OPS
12
+ from .git_version_info import installed_ops, torch_info, accelerator_name
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+ GREEN = '\033[92m'
16
+ RED = '\033[91m'
17
+ YELLOW = '\033[93m'
18
+ END = '\033[0m'
19
+ SUCCESS = f"{GREEN} [SUCCESS] {END}"
20
+ OKAY = f"{GREEN}[OKAY]{END}"
21
+ WARNING = f"{YELLOW}[WARNING]{END}"
22
+ FAIL = f'{RED}[FAIL]{END}'
23
+ INFO = '[INFO]'
24
+
25
+ color_len = len(GREEN) + len(END)
26
+ okay = f"{GREEN}[OKAY]{END}"
27
+ warning = f"{YELLOW}[WARNING]{END}"
28
+
29
+
30
+ def op_report(verbose=True):
31
+ max_dots = 23
32
+ max_dots2 = 11
33
+ h = ["op name", "installed", "compatible"]
34
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
35
+ print("DeepSpeed C++/CUDA extension op report")
36
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
37
+
38
+ print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n"
39
+ " runtime if needed. Op compatibility means that your system\n"
40
+ " meet the required dependencies to JIT install the op.")
41
+
42
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
43
+ print("JIT compiled ops requires ninja")
44
+ ninja_status = OKAY if ninja_installed() else FAIL
45
+ print('ninja', "." * (max_dots - 5), ninja_status)
46
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
47
+ print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2])
48
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
49
+ installed = f"{GREEN}[YES]{END}"
50
+ no = f"{YELLOW}[NO]{END}"
51
+ for op_name, builder in ALL_OPS.items():
52
+ dots = "." * (max_dots - len(op_name))
53
+ is_compatible = OKAY if builder.is_compatible(verbose) else no
54
+ is_installed = installed if installed_ops.get(op_name,
55
+ False) and accelerator_name == get_accelerator()._name else no
56
+ dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) - (len(is_installed) - color_len))
57
+ print(op_name, dots, is_installed, dots2, is_compatible)
58
+ print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
59
+
60
+
61
+ def ninja_installed():
62
+ try:
63
+ import ninja # noqa: F401 # type: ignore
64
+ except ImportError:
65
+ return False
66
+ return True
67
+
68
+
69
+ def nvcc_version():
70
+ import torch.utils.cpp_extension
71
+ cuda_home = torch.utils.cpp_extension.CUDA_HOME
72
+ if cuda_home is None:
73
+ return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}"
74
+ try:
75
+ output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
76
+ except FileNotFoundError:
77
+ return f"{RED} [FAIL] nvcc missing {END}"
78
+ output_split = output.split()
79
+ release_idx = output_split.index("release")
80
+ release = output_split[release_idx + 1].replace(',', '').split(".")
81
+ return ".".join(release)
82
+
83
+
84
+ def installed_cann_path():
85
+ if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]):
86
+ return os.environ["ASCEND_HOME_PATH"]
87
+ return None
88
+
89
+
90
+ def installed_cann_version():
91
+ import re
92
+ ascend_path = installed_cann_path()
93
+ if ascend_path is None:
94
+ return f"CANN_HOME does not exist, unable to compile NPU op(s)"
95
+ cann_version = ""
96
+ for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)):
97
+ if cann_version:
98
+ break
99
+ install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)]
100
+ if install_files:
101
+ filepath = os.path.join(dirpath, install_files[0])
102
+ with open(filepath, "r") as f:
103
+ for line in f:
104
+ if line.find("version") != -1:
105
+ cann_version = line.strip().split("=")[-1]
106
+ break
107
+ return cann_version
108
+
109
+
110
+ def get_shm_size():
111
+ try:
112
+ shm_stats = os.statvfs('/dev/shm')
113
+ except (OSError, FileNotFoundError, ValueError):
114
+ return "UNKNOWN", None
115
+
116
+ shm_size = shm_stats.f_frsize * shm_stats.f_blocks
117
+ shm_hbytes = human_readable_size(shm_size)
118
+ warn = []
119
+ if shm_size < 512 * 1024**2:
120
+ warn.append(
121
+ f" {YELLOW} [WARNING] /dev/shm size might be too small, if running in docker increase to at least --shm-size='1gb' {END}"
122
+ )
123
+ if get_accelerator().communication_backend_name() == "nccl":
124
+ warn.append(
125
+ f" {YELLOW} [WARNING] see more details about NCCL requirements: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#sharing-data {END}"
126
+ )
127
+ return shm_hbytes, warn
128
+
129
+
130
+ def human_readable_size(size):
131
+ units = ['B', 'KB', 'MB', 'GB', 'TB']
132
+ i = 0
133
+ while size >= 1024 and i < len(units) - 1:
134
+ size /= 1024
135
+ i += 1
136
+ return f'{size:.2f} {units[i]}'
137
+
138
+
139
+ def debug_report():
140
+ max_dots = 33
141
+
142
+ report = [("torch install path", torch.__path__), ("torch version", torch.__version__),
143
+ ("deepspeed install path", deepspeed.__path__),
144
+ ("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}")]
145
+ if get_accelerator().device_name() == 'cuda':
146
+ hip_version = getattr(torch.version, "hip", None)
147
+ report.extend([("torch cuda version", torch.version.cuda), ("torch hip version", hip_version),
148
+ ("nvcc version", (None if hip_version else nvcc_version())),
149
+ ("deepspeed wheel compiled w.", f"torch {torch_info['version']}, " +
150
+ (f"hip {torch_info['hip_version']}" if hip_version else f"cuda {torch_info['cuda_version']}"))
151
+ ])
152
+ elif get_accelerator().device_name() == 'npu':
153
+ import torch_npu
154
+ report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']}"),
155
+ ("torch_npu install path", torch_npu.__path__), ("torch_npu version", torch_npu.__version__),
156
+ ("ascend_cann version", installed_cann_version())])
157
+ else:
158
+ report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']} ")])
159
+
160
+ report.append(("shared memory (/dev/shm) size", get_shm_size()))
161
+
162
+ print("DeepSpeed general environment info:")
163
+ for name, value in report:
164
+ warns = []
165
+ if isinstance(value, tuple):
166
+ value, warns = value
167
+ print(name, "." * (max_dots - len(name)), value)
168
+ if warns:
169
+ for warn in warns:
170
+ print(warn)
171
+
172
+
173
+ def parse_arguments():
174
+ parser = argparse.ArgumentParser()
175
+ parser.add_argument('--hide_operator_status',
176
+ action='store_true',
177
+ help='Suppress display of installation and compatibility statuses of DeepSpeed operators. ')
178
+ parser.add_argument('--hide_errors_and_warnings', action='store_true', help='Suppress warning and error messages.')
179
+ args = parser.parse_args()
180
+ return args
181
+
182
+
183
+ def main(hide_operator_status=False, hide_errors_and_warnings=False):
184
+ if not hide_operator_status:
185
+ op_report(verbose=not hide_errors_and_warnings)
186
+ debug_report()
187
+
188
+
189
+ def cli_main():
190
+ args = parse_arguments()
191
+ main(hide_operator_status=args.hide_operator_status, hide_errors_and_warnings=args.hide_errors_and_warnings)
192
+
193
+
194
+ if __name__ == "__main__":
195
+ main()
venv/lib/python3.10/site-packages/deepspeed/git_version_info.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ try:
7
+ # This is populated by setup.py
8
+ from .git_version_info_installed import * # noqa: F401 # type: ignore
9
+ except ModuleNotFoundError:
10
+ import os
11
+ if os.path.isfile('version.txt'):
12
+ # Will be missing from checkouts that haven't been installed (e.g., readthedocs)
13
+ version = open('version.txt', 'r').read().strip()
14
+ else:
15
+ version = "0.0.0"
16
+ git_hash = '[none]'
17
+ git_branch = '[none]'
18
+
19
+ from .ops.op_builder.all_ops import ALL_OPS
20
+ installed_ops = dict.fromkeys(ALL_OPS.keys(), False)
21
+ accelerator_name = ""
22
+ torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"}
23
+
24
+ # compatible_ops list is recreated for each launch
25
+ from .ops.op_builder.all_ops import ALL_OPS
26
+
27
+ compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
28
+ for op_name, builder in ALL_OPS.items():
29
+ op_compatible = builder.is_compatible()
30
+ compatible_ops[op_name] = op_compatible
31
+ compatible_ops["deepspeed_not_implemented"] = False
venv/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ version='0.14.2'
2
+ git_hash='unknown'
3
+ git_branch='unknown'
4
+ installed_ops={'deepspeed_not_implemented': False, 'deepspeed_ccl_comm': False, 'deepspeed_shm_comm': False, 'cpu_adam': False, 'fused_adam': False}
5
+ accelerator_name='cpu'
6
+ torch_info={'version': '2.3', 'bf16_support': False, 'cuda_version': '12.1', 'nccl_version': '2.20', 'hip_version': '0.0'}
venv/lib/python3.10/site-packages/deepspeed/pydantic_v1.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """Pydantic v1 compatibility module.
6
+
7
+ Pydantic v2 introduced breaking changes that hinder its adoption:
8
+ https://docs.pydantic.dev/latest/migration/. To provide deepspeed users the option to
9
+ migrate to pydantic v2 on their own timeline, deepspeed uses this compatibility module
10
+ as a pydantic-version-agnostic alias for pydantic's v1 API.
11
+ """
12
+
13
+ try:
14
+ from pydantic.v1 import * # noqa: F401
15
+ except ImportError:
16
+ from pydantic import * # noqa: F401
venv/lib/python3.10/site-packages/deepspeed/runtime/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
venv/lib/python3.10/site-packages/deepspeed/runtime/bf16_optimizer.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from collections import OrderedDict
7
+ import torch
8
+ import sys
9
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
10
+ from deepspeed import comm as dist
11
+ from deepspeed.runtime.constants import PIPE_REPLICATED
12
+ from deepspeed.runtime.base_optimizer import ZeROOptimizer
13
+ from packaging import version as pkg_version
14
+ from deepspeed.git_version_info import version
15
+ from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim,
16
+ align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter,
17
+ see_memory_usage, graph_process, get_norm_with_moe_layers)
18
+ from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups
19
+ from deepspeed.moe.utils import is_moe_param, is_moe_param_group
20
+ from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank
21
+ from deepspeed.checkpoint import enable_universal_checkpoint
22
+ from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE,
23
+ SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS,
24
+ PARAM_SLICE_MAPPINGS)
25
+
26
+ setattr(sys.modules[__name__], 'fragment_address', fragment_address)
27
+
28
+
29
+ class BF16_Optimizer(ZeROOptimizer):
30
+
31
+ def __init__(self,
32
+ init_optimizer,
33
+ param_names,
34
+ mpu=None,
35
+ clip_grad=0.0,
36
+ norm_type=2,
37
+ allgather_bucket_size=5000000000,
38
+ dp_process_group=None,
39
+ timers=None,
40
+ grad_acc_dtype=None,
41
+ graph_harvesting=False,
42
+ immediate_grad_update=False,
43
+ has_moe_layers=False):
44
+ super().__init__()
45
+ see_memory_usage('begin bf16_optimizer', force=True)
46
+ self.timers = timers
47
+ self.optimizer = init_optimizer
48
+ self.param_names = param_names
49
+ self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim)
50
+
51
+ assert grad_acc_dtype in [torch.float32, torch.bfloat16
52
+ ], f"BF16Optimizer: Unsupported gradient accumulation data type: {grad_acc_dtype}"
53
+ self.grad_acc_dtype = grad_acc_dtype
54
+ self.immediate_grad_update = immediate_grad_update
55
+
56
+ self.clip_grad = clip_grad
57
+ self.norm_type = norm_type
58
+ self.mpu = mpu
59
+ self.allgather_bucket_size = int(allgather_bucket_size)
60
+ self.dp_process_group = dp_process_group
61
+ self.dp_rank = dist.get_rank(group=self.dp_process_group)
62
+ self.has_moe_layers = has_moe_layers
63
+ self.non_expert_gradients = []
64
+ self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))]
65
+ if self.has_moe_layers:
66
+ self._configure_moe_settings()
67
+
68
+ # Use torch (un)flatten ops
69
+ self.flatten = _flatten_dense_tensors
70
+ self.unflatten = _unflatten_dense_tensors
71
+
72
+ #align nccl all-gather send buffers to 4-bye boundary
73
+ self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
74
+
75
+ # Build BF16/FP32 groups
76
+ self.bf16_groups = []
77
+ self.bf16_groups_flat = []
78
+ self.bf16_partitioned_groups = []
79
+
80
+ self.fp32_groups_flat_partition = []
81
+
82
+ # Maintain different fp32 gradients views for convenience
83
+ self.fp32_groups_gradients = []
84
+ self.fp32_groups_gradient_dict = {}
85
+ self.fp32_groups_gradients_flat = []
86
+ self.fp32_groups_actual_gradients_flat = []
87
+ self.fp32_groups_gradient_flat_partition = []
88
+ self.fp32_groups_has_gradients = []
89
+
90
+ self.group_paddings = []
91
+ self.graph_harvesting = graph_harvesting
92
+ if self.using_real_optimizer:
93
+ self._setup_for_real_optimizer()
94
+
95
+ see_memory_usage('end bf16_optimizer', force=True)
96
+
97
+ def _configure_moe_settings(self):
98
+ assert any(
99
+ [is_moe_param_group(group) for group in self.optimizer.param_groups]
100
+ ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
101
+
102
+ for i, group in enumerate(self.optimizer.param_groups):
103
+ if is_moe_param_group(group):
104
+ assert all([is_moe_param(param)
105
+ for param in group['params']]), "All params in MoE group must be MoE params"
106
+ self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name'])
107
+ self.expert_gradients = {}
108
+ if self.has_moe_layers:
109
+ for key in groups._get_expert_data_parallel_group_dict().keys():
110
+ self.expert_gradients[key] = []
111
+
112
+ def _setup_for_real_optimizer(self):
113
+ self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group]
114
+
115
+ for i, param_group in enumerate(self.optimizer.param_groups):
116
+ real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i])
117
+ see_memory_usage(f'before initializing group {i}', force=True)
118
+
119
+ partition_id = dist.get_rank(group=self.real_dp_process_group[i])
120
+
121
+ # grab the original list
122
+ trainable_parameters = [param for param in param_group['params'] if param.requires_grad]
123
+ self.bf16_groups.append(trainable_parameters)
124
+
125
+ # create flat bf16 params
126
+ self.bf16_groups_flat.append(
127
+ self._flatten_dense_tensors_aligned(self.bf16_groups[i],
128
+ self.nccl_start_alignment_factor * real_dp_world_size))
129
+ # Make bf16 params point to flat tensor storage
130
+ self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i],
131
+ flat_tensor=self.bf16_groups_flat[i])
132
+
133
+ # divide flat weights into equal sized partitions
134
+ partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size
135
+ bf16_dp_partitions = [
136
+ self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size)
137
+ for dp_index in range(real_dp_world_size)
138
+ ]
139
+ self.bf16_partitioned_groups.append(bf16_dp_partitions)
140
+
141
+ # create fp32 params partition
142
+ self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach())
143
+ self.fp32_groups_flat_partition[i].requires_grad = True
144
+
145
+ num_elem_list = [t.numel() for t in self.bf16_groups[i]]
146
+
147
+ # create fp32 gradients
148
+ fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype)
149
+ self.fp32_groups_gradients_flat.append(fp32_flat_buffer)
150
+ if self.has_moe_layers and is_moe_param_group(param_group):
151
+ self.expert_gradients[param_group['name']].append(fp32_flat_buffer)
152
+ else:
153
+ self.non_expert_gradients.append(fp32_flat_buffer)
154
+
155
+ # track individual fp32 gradients for entire model
156
+ fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i],
157
+ num_elem_list=num_elem_list)
158
+ self.fp32_groups_gradients.append(fp32_gradients)
159
+ self.fp32_groups_gradient_dict[i] = fp32_gradients
160
+
161
+ # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding)
162
+ length_without_padding = sum(num_elem_list)
163
+ self.fp32_groups_actual_gradients_flat.append(
164
+ torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding))
165
+
166
+ # flat tensor corresponding to gradient partition
167
+ self.fp32_groups_gradient_flat_partition.append(
168
+ torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size))
169
+
170
+ # track fp32 gradient updates
171
+ self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i]))
172
+
173
+ # Record padding required for alignment
174
+ if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
175
+ padding = self.bf16_groups_flat[i].numel() - length_without_padding
176
+ else:
177
+ padding = 0
178
+
179
+ self.group_paddings.append(padding)
180
+
181
+ # update optimizer param groups to reference fp32 params partition
182
+ param_group['params'] = [self.fp32_groups_flat_partition[i]]
183
+
184
+ see_memory_usage(f'after initializing group {i}', force=True)
185
+
186
+ see_memory_usage('before initialize_optimizer', force=True)
187
+ self.initialize_optimizer_states()
188
+ see_memory_usage('end initialize_optimizer', force=True)
189
+
190
+ if self.immediate_grad_update:
191
+ self.create_grad_acc_hooks()
192
+
193
+ # Need optimizer states initialized before linking lp to optimizer state
194
+ self._link_all_hp_params()
195
+ self._hp_optimizer_states_linked = False
196
+ self._enable_universal_checkpoint()
197
+ self._param_slice_mappings = self._create_param_mapping()
198
+
199
+ def _enable_universal_checkpoint(self):
200
+ for lp_param_group in self.bf16_groups:
201
+ enable_universal_checkpoint(param_list=lp_param_group)
202
+
203
+ def _create_param_mapping(self):
204
+ param_mapping = []
205
+ for i, _ in enumerate(self.optimizer.param_groups):
206
+ param_mapping_per_group = OrderedDict()
207
+ for lp in self.bf16_groups[i]:
208
+ if lp._hp_mapping is not None:
209
+ lp_name = self.param_names[lp]
210
+ param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
211
+ param_mapping.append(param_mapping_per_group)
212
+
213
+ return param_mapping
214
+
215
+ def _link_all_hp_params(self):
216
+ for i, _ in enumerate(self.optimizer.param_groups):
217
+ real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i])
218
+
219
+ # Link bf16 and fp32 params in partition
220
+ partition_id = dist.get_rank(group=self.real_dp_process_group[i])
221
+ partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size
222
+ flat_hp_partition = self.fp32_groups_flat_partition[i]
223
+ link_hp_params(lp_param_list=self.bf16_groups[i],
224
+ flat_hp_partition=flat_hp_partition,
225
+ gradient_dict=self.fp32_groups_gradient_dict,
226
+ offload_gradient_dict=None,
227
+ use_offload=False,
228
+ param_group_index=i,
229
+ partition_start=partition_id * partition_size,
230
+ partition_size=partition_size,
231
+ dp_group=self.real_dp_process_group[i])
232
+
233
+ def _lazy_init_hp_params_optimizer_state(self):
234
+ if not self._hp_optimizer_states_linked:
235
+ for i, _ in enumerate(self.optimizer.param_groups):
236
+ lazy_init_hp_params_optimizer_state(self.bf16_groups[i], self.fp32_groups_flat_partition[i],
237
+ self.optimizer.state)
238
+ self._hp_optimizer_states_linked = True
239
+
240
+ def initialize_optimizer_states(self):
241
+ """Take an optimizer step with zero-valued gradients to allocate internal
242
+ optimizer state.
243
+
244
+ This helps prevent memory fragmentation by allocating optimizer state at the
245
+ beginning of training instead of after activations have been allocated.
246
+ """
247
+ for param_partition, grad_partition in zip(self.fp32_groups_flat_partition,
248
+ self.fp32_groups_gradient_flat_partition):
249
+ # In case of grad acc dtype different than FP32, need to cast to high precision.
250
+ param_partition.grad = grad_partition.to(
251
+ param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition
252
+
253
+ if self.grad_acc_dtype is not torch.float32:
254
+ for param_partition in self.fp32_groups_flat_partition:
255
+ param_partition.grad = None
256
+
257
+ self.clear_hp_grads()
258
+
259
+ def _split_flat_tensor(self, flat_tensor, num_elem_list):
260
+ assert sum(num_elem_list) <= flat_tensor.numel()
261
+ tensor_list = []
262
+ offset = 0
263
+ for num_elem in num_elem_list:
264
+ dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem)
265
+ tensor_list.append(dense_tensor)
266
+ offset += num_elem
267
+
268
+ return tensor_list
269
+
270
+ def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor):
271
+ updated_params = self.unflatten(flat_tensor, tensor_list)
272
+ for p, q in zip(tensor_list, updated_params):
273
+ p.data = q.data
274
+
275
+ def _flatten_dense_tensors_aligned(self, tensor_list, alignment):
276
+ return self.flatten(align_dense_tensors(tensor_list, alignment))
277
+
278
+ @torch.no_grad()
279
+ def step(self, closure=None):
280
+ if closure is not None:
281
+ raise NotImplementedError(f'{self.__class__} does not support closure.')
282
+
283
+ non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm()
284
+ non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm,
285
+ mpu=self.mpu,
286
+ norm_type=self.norm_type,
287
+ use_graph=self.graph_harvesting)
288
+ all_groups_norm = non_expert_groups_norm
289
+ if self.has_moe_layers:
290
+ all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm,
291
+ mpu=self.mpu,
292
+ expert_tensors=expert_grads_for_norm,
293
+ norm_type=self.norm_type)
294
+
295
+ self._global_grad_norm = all_groups_norm
296
+
297
+ assert all_groups_norm > 0.
298
+ if self.clip_grad > 0.:
299
+ clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True),
300
+ max_norm=self.clip_grad,
301
+ global_norm=all_groups_norm,
302
+ mpu=self.mpu,
303
+ use_graph=self.graph_harvesting)
304
+
305
+ self.optimizer.step()
306
+
307
+ # We need to link optimizer state after the first step() call
308
+ self._lazy_init_hp_params_optimizer_state()
309
+
310
+ self.update_lp_params()
311
+
312
+ self.clear_hp_grads()
313
+
314
+ def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):
315
+ """Perform a backward pass and copy the low-precision gradients to the
316
+ high-precision copy.
317
+
318
+ We copy/accumulate to the high-precision grads now to prevent accumulating in the
319
+ bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1)
320
+
321
+ The low-precision grads are deallocated during this procedure.
322
+ """
323
+ self.clear_lp_grads()
324
+ loss.backward(**bwd_kwargs)
325
+
326
+ if update_hp_grads:
327
+ self.update_hp_grads(clear_lp_grads=clear_lp_grads)
328
+
329
+ @torch.no_grad()
330
+ def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads):
331
+ if lp.grad is None:
332
+ return
333
+
334
+ hp_grad = self.fp32_groups_gradients[group_idx][param_idx]
335
+ assert hp_grad is not None, \
336
+ f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{group_idx}][{param_idx}]'
337
+
338
+ hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape))
339
+ lp._hp_grad = hp_grad
340
+ self.fp32_groups_has_gradients[group_idx][param_idx] = True
341
+
342
+ # clear gradients
343
+ if clear_lp_grads:
344
+ lp.grad.zero_()
345
+
346
+ @torch.no_grad()
347
+ def _update_hp_grads_func(self, clear_lp_grads=False):
348
+ for i, group in enumerate(self.bf16_groups):
349
+ for j, lp in enumerate(group):
350
+ self._update_hp_grad(lp, i, j, clear_lp_grads)
351
+
352
+ @torch.no_grad()
353
+ def update_hp_grads(self, clear_lp_grads=False):
354
+ if self.immediate_grad_update:
355
+ return
356
+
357
+ if self.graph_harvesting:
358
+ graph_process(False, self._update_hp_grads_func, clear_lp_grads)
359
+ else:
360
+ self._update_hp_grads_func(clear_lp_grads)
361
+ #cpu op
362
+ for i, group in enumerate(self.bf16_groups):
363
+ for j, lp in enumerate(group):
364
+ if lp.grad is None:
365
+ continue
366
+ self.fp32_groups_has_gradients[i][j] = True
367
+
368
+ @torch.no_grad()
369
+ def get_grads_for_reduction(self):
370
+ if self.has_moe_layers:
371
+ return self.non_expert_gradients, self.expert_gradients
372
+ return self.non_expert_gradients, {}
373
+
374
+ @torch.no_grad()
375
+ def get_grads_for_norm(self, for_clipping=False):
376
+ """
377
+ Returns:
378
+ tuple[list[Tensor], dict[ep_name, List[Tensor]] | list:
379
+ If for_clipping, return all gradients.
380
+ Otherwise, separate and return dict of expert_grad and list of non_expert_grad
381
+ """
382
+ # (grads, expert_group_name)
383
+ expert_grads_for_norm = {}
384
+
385
+ # grads
386
+ non_expert_grads_for_norm = []
387
+ all_grads_for_clip = []
388
+
389
+ tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
390
+ assert len(self.bf16_groups) == len(self.optimizer.param_groups)
391
+ for i, group in enumerate(self.bf16_groups):
392
+ for j, lp in enumerate(group):
393
+ if not for_clipping:
394
+ if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated:
395
+ continue
396
+
397
+ # skip duplicated parameters. perform norm only on cards with tp_rank=0.
398
+ # non-duplicated parameters include:
399
+ # - Parameters with tp: Use allreducesum of mp_group.
400
+ # - Moe Parameters with ep: Use allreducesum of ep_group.
401
+ if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)):
402
+ continue
403
+
404
+ if not self.fp32_groups_has_gradients[i][j]:
405
+ continue
406
+ if not for_clipping:
407
+ param_group = self.optimizer.param_groups[i]
408
+ if self.has_moe_layers and is_moe_param_group(param_group):
409
+ if param_group['name'] not in expert_grads_for_norm:
410
+ expert_grads_for_norm[param_group['name']] = []
411
+ expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j])
412
+ else:
413
+ non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j])
414
+ else:
415
+ all_grads_for_clip.append(self.fp32_groups_gradients[i][j])
416
+ if not for_clipping:
417
+ return non_expert_grads_for_norm, expert_grads_for_norm
418
+ return all_grads_for_clip
419
+
420
+ @torch.no_grad()
421
+ def update_lp_params(self):
422
+ for i, (bf16_partitions,
423
+ fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)):
424
+ partition_id = dist.get_rank(group=self.real_dp_process_group[i])
425
+ bf16_partitions[partition_id].data.copy_(fp32_partition.data)
426
+ # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True)
427
+ # if i == 0:
428
+ # print_rank_0(f'{fp32_partition[:10]=}', force=True)
429
+
430
+ all_gather_dp_groups(groups_flat=self.bf16_groups_flat,
431
+ partitioned_param_groups=self.bf16_partitioned_groups,
432
+ dp_process_group=self.real_dp_process_group,
433
+ start_alignment_factor=self.nccl_start_alignment_factor,
434
+ allgather_bucket_size=self.allgather_bucket_size)
435
+
436
+ def clear_hp_grads(self):
437
+ for flat_gradients in self.fp32_groups_gradients_flat:
438
+ flat_gradients.zero_()
439
+
440
+ for i, group in enumerate(self.fp32_groups_gradients):
441
+ self.fp32_groups_has_gradients[i] = [False] * len(group)
442
+
443
+ def clear_lp_grads(self):
444
+
445
+ # using zero_() fixed memory address for graph replay
446
+ set_to_none = False if self.graph_harvesting else True
447
+ zero_grads_list = []
448
+ for group in self.bf16_groups:
449
+ for param in group:
450
+ if set_to_none:
451
+ param.grad = None
452
+ elif param.grad is not None:
453
+ if param.grad.grad_fn is not None:
454
+ param.grad.detach_()
455
+ zero_grads_list.append(param.grad)
456
+ if not set_to_none and len(zero_grads_list) > 0:
457
+ torch._foreach_zero_(zero_grads_list)
458
+
459
+ def state_dict(self):
460
+ state_dict = {}
461
+ state_dict[CLIP_GRAD] = self.clip_grad
462
+ state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
463
+ state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition
464
+ state_dict[GROUP_PADDINGS] = self.group_paddings
465
+ state_dict[PARTITION_COUNT] = self.partition_count
466
+ state_dict[DS_VERSION] = version
467
+ state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings
468
+
469
+ return state_dict
470
+
471
+ # Restore base optimizer fp32 weights bfloat16 weights
472
+ def _restore_from_bit16_weights(self):
473
+ for i, group in enumerate(self.bf16_groups):
474
+ partition_id = dist.get_rank(group=self.real_dp_process_group[i])
475
+ for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition):
476
+ fp32_partition.data.copy_(bf16_partitions[partition_id].data)
477
+
478
+ def refresh_fp32_params(self):
479
+ self._restore_from_bit16_weights()
480
+
481
+ def load_state_dict(self,
482
+ state_dict_list,
483
+ checkpoint_folder,
484
+ load_optimizer_states=True,
485
+ load_from_fp32_weights=False,
486
+ load_serial=None):
487
+ if checkpoint_folder:
488
+ self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights)
489
+ else:
490
+ self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights)
491
+
492
+ def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False):
493
+
494
+ dp_rank = dist.get_rank(group=self.dp_process_group)
495
+ current_rank_sd = state_dict_list[dp_rank]
496
+
497
+ ckpt_version = current_rank_sd.get(DS_VERSION, False)
498
+ assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
499
+ ckpt_version = pkg_version.parse(ckpt_version)
500
+
501
+ self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
502
+
503
+ if load_optimizer_states:
504
+ print(f"_load_legacy_checkpoint current_rank_sd[BASE_OPTIMIZER_STATE]")
505
+ self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
506
+
507
+ if load_from_fp32_weights:
508
+ for current, saved in zip(self.fp32_groups_flat_partition,
509
+ current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
510
+ src_tensor = _get_padded_tensor(saved, current.numel())
511
+ current.data.copy_(src_tensor.data)
512
+
513
+ if load_optimizer_states:
514
+ self._link_all_hp_params()
515
+
516
+ def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights):
517
+ self.load_hp_checkpoint_state_from_checkpoint_dir("bf16_groups", checkpoint_folder)
518
+
519
+ def _load_global_state(self, sd):
520
+ pass
521
+
522
+ @property
523
+ def param_groups(self):
524
+ """Forward the wrapped optimizer's parameters."""
525
+ return self.optimizer.param_groups
526
+
527
+ def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx):
528
+ assert self.immediate_grad_update
529
+ self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=True)
530
+
531
+ def create_grad_acc_hooks(self):
532
+ self.grad_accs = []
533
+ for i, param_group in enumerate(self.bf16_groups):
534
+ for j, param in enumerate(param_group):
535
+ if param.requires_grad:
536
+
537
+ def wrapper(param, i, j):
538
+ param_tmp = param.expand_as(param)
539
+ grad_acc = param_tmp.grad_fn.next_functions[0][0]
540
+
541
+ def accumulate_hp_grads_and_remove_lp(*notneeded):
542
+ self.accumulate_hp_grads_and_remove_lp(param, i, j)
543
+
544
+ grad_acc.register_hook(accumulate_hp_grads_and_remove_lp)
545
+ self.grad_accs.append(grad_acc)
546
+
547
+ wrapper(param, i, j)
548
+
549
+
550
+ def _get_padded_tensor(src_tensor, size):
551
+ if src_tensor.numel() >= size:
552
+ return src_tensor
553
+ padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
554
+ slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
555
+ slice_tensor.data.copy_(src_tensor.data)
556
+ return padded_tensor
venv/lib/python3.10/site-packages/deepspeed/runtime/compiler.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Union, Callable, Dict, Any
7
+ import importlib
8
+ import torch
9
+ from ..pydantic_v1 import validator
10
+ from .config_utils import DeepSpeedConfigModel
11
+
12
+ COMPILE_CONFIG = "compile"
13
+
14
+
15
+ def is_compile_supported():
16
+ return hasattr(torch, "compiler")
17
+
18
+
19
+ def disable(func):
20
+ if is_compile_supported():
21
+ return torch.compiler.disable(func)
22
+ return func
23
+
24
+
25
+ def get_compile_config(param_dict):
26
+ if COMPILE_CONFIG in param_dict:
27
+ compile_config_dict = param_dict[COMPILE_CONFIG]
28
+ else:
29
+ compile_config_dict = {}
30
+ return CompileConfig(**compile_config_dict)
31
+
32
+
33
+ def get_backend_fn(backend: Union[str, Callable]) -> Union[str, Callable]:
34
+ if isinstance(backend, Callable):
35
+ return backend
36
+
37
+ elif isinstance(backend, str):
38
+ if backend in torch._dynamo.list_backends(exclude_tags=()):
39
+ return backend
40
+
41
+ # Get module name from backend name
42
+ module_name = '.'.join(backend.split('.')[:-1])
43
+ fn_name = backend.split('.')[-1]
44
+
45
+ try:
46
+ module = importlib.import_module(module_name)
47
+ backend_fn = getattr(module, fn_name)
48
+ except ImportError:
49
+ raise ValueError(
50
+ f"The backend {backend} is not in the list of available backends and could not be imported.")
51
+ return backend_fn
52
+
53
+ raise ValueError(f"backend for torch.compile must be a string or Callable: {backend}")
54
+
55
+
56
+ class CompileConfig(DeepSpeedConfigModel):
57
+ """
58
+ [EXPERIMENTAL] This configuration enables users to activate `torch.compile` within DeepSpeed and customize its settings.
59
+ Please be aware that these features and API designs are experimental and subject to change.
60
+ """
61
+
62
+ enabled: bool = False
63
+ """
64
+ Enable torch.compile when True.
65
+ """
66
+
67
+ backend: str = "inductor"
68
+ """
69
+ Passed to `backend` argument of torch.compile.
70
+ If the given value is not in torch._dynamo.list_backends(),
71
+ DeepSpeed attempts to import and instantiate the module with the given name.
72
+ """
73
+
74
+ kwargs: Dict[str, Any] = {}
75
+ """
76
+ Passed to `kwargs` argument of torch.compile.
77
+ """
78
+
79
+ @validator("enabled")
80
+ def validate_enabled(cls, field_value, values):
81
+ if field_value and not is_compile_supported():
82
+ raise ValueError("torch.compile is not supported on this version of PyTorch.")
83
+ return field_value
84
+
85
+
86
+ class CompiledModuleWrapper(torch.nn.Module):
87
+
88
+ def __init__(self, module, compile_config: Union[CompileConfig, None] = None):
89
+ super().__init__()
90
+
91
+ assert is_compile_supported(), "torch.compile is not supported on this version of PyTorch."
92
+
93
+ modules = self.__dict__.get('_modules')
94
+ modules['wrapped'] = module
95
+ self.__dict__['wrapped'] = module
96
+ self._is_compiled = False
97
+ self._backend = get_backend_fn(compile_config.backend)
98
+ self._compile_kwargs = compile_config.kwargs
99
+ self._compiler_fn = None
100
+
101
+ def __getattr__(self, name):
102
+ return getattr(self.__dict__['wrapped'], name)
103
+
104
+ def set_backend(self, backend: Union[str, Callable]):
105
+ """Set the backend for torch.compile.
106
+
107
+ Args:
108
+ backend (Union[str, Callable]): backend name or a function that takes a torch.nn.Module and returns a compiled module.
109
+ You can directly pass a function that works as a backend.
110
+ See also `backend` field in `CompileConfig` for more details.
111
+ """
112
+ self._backend = get_backend_fn(backend)
113
+
114
+ def set_torch_compile_kwargs(self, kwargs: Dict[str, Union[str, Any]]) -> None:
115
+ """Set kwargs for torch.compile. Kwargs that are set in DeepSpeed config will be overwritten.
116
+ You can also pass a backend name with "backend" key to change the backend.
117
+
118
+ Args:
119
+ kwargs (Dict[str, Union[str, Any]]): kwargs passed to torch.compile.
120
+ """
121
+
122
+ if "backend" in kwargs:
123
+ raise ValueError("backend cannot be set as compile kwargs. Use set_backend instead.")
124
+ self._compile_kwargs.update(kwargs)
125
+
126
+ def set_compiler_fn(self, compiler_fn: Callable) -> None:
127
+ """Set a function to be used for compiling the module.
128
+ This function should take a torch.nn.Module as input and return a compiled module.
129
+ Note that other compile options are ignored when a compiler_fn is set.
130
+
131
+ Example:
132
+ ```python
133
+ def my_compiler_fn(module: torch.nn.Module):
134
+ ...
135
+ return torch.compile(module, ...)
136
+
137
+ engine.set_compiler_fn(my_compiler_fn)
138
+ ```
139
+ """
140
+ self._compiler_fn = compiler_fn
141
+
142
+ def forward(self, *args, **kwargs) -> Any:
143
+ if not self.is_compiled:
144
+ if self._compiler_fn is None:
145
+ self.__dict__['wrapped'] = torch.compile(self.wrapped, backend=self._backend, **self._compile_kwargs)
146
+ else:
147
+ self.__dict__['wrapped'] = self._compiler_fn(self.wrapped)
148
+ self._is_compiled = True
149
+
150
+ return self.__dict__['wrapped'](*args, **kwargs)
151
+
152
+ @property
153
+ def is_compiled(self) -> bool:
154
+ return self._is_compiled
155
+
156
+ @property
157
+ def backend(self) -> Union[str, Callable]:
158
+ return self._backend
159
+
160
+ @property
161
+ def torch_compile_kwargs(self) -> Dict[str, Any]:
162
+ return self._compile_kwargs
163
+
164
+ @property
165
+ def compiler_fn(self) -> Union[Callable, None]:
166
+ return self._compiler_fn
venv/lib/python3.10/site-packages/deepspeed/runtime/config.py ADDED
@@ -0,0 +1,1039 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from typing import Union
8
+ from enum import Enum
9
+
10
+ import torch
11
+ import json
12
+ import hjson
13
+ import copy
14
+ import base64
15
+
16
+ from .constants import *
17
+ from .fp16.loss_scaler import (
18
+ INITIAL_LOSS_SCALE,
19
+ SCALE_WINDOW,
20
+ DELAYED_SHIFT,
21
+ CONSECUTIVE_HYSTERESIS,
22
+ MIN_LOSS_SCALE,
23
+ )
24
+ from .config_utils import (
25
+ get_scalar_param,
26
+ dict_raise_error_on_duplicate_keys,
27
+ ScientificNotationEncoder,
28
+ )
29
+ from .zero.config import get_zero_config, ZeroStageEnum
30
+ from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
31
+ from ..comm.config import DeepSpeedCommsConfig
32
+ from ..monitor.config import get_monitor_config
33
+ from ..inference.config import WeightQuantConfig
34
+ from .compiler import get_compile_config
35
+
36
+ from deepspeed import comm as dist
37
+ from deepspeed.runtime.config_utils import DeepSpeedConfigModel
38
+
39
+ from ..git_version_info import version as __version__
40
+ from ..utils import logger
41
+
42
+ from ..elasticity import (
43
+ elasticity_enabled,
44
+ compute_elastic_config,
45
+ ensure_immutable_elastic_config,
46
+ )
47
+ from ..elasticity.config import ElasticityConfigError
48
+ from ..elasticity.constants import (
49
+ ELASTICITY,
50
+ IGNORE_NON_ELASTIC_BATCH_INFO,
51
+ IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
52
+ MODEL_PARALLEL_SIZE,
53
+ MODEL_PARALLEL_SIZE_DEFAULT,
54
+ NUM_GPUS_PER_NODE,
55
+ NUM_GPUS_PER_NODE_DEFAULT,
56
+ )
57
+
58
+ from ..profiling.config import DeepSpeedFlopsProfilerConfig
59
+ from ..autotuning.config import DeepSpeedAutotuningConfig
60
+ from ..nebula.config import DeepSpeedNebulaConfig
61
+
62
+ from ..compression.config import get_compression_config, get_quantize_enabled
63
+ from ..compression.constants import *
64
+ from .swap_tensor.aio_config import get_aio_config
65
+
66
+ from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy
67
+ from .data_pipeline.constants import *
68
+
69
+ TENSOR_CORE_ALIGN_SIZE = 8
70
+
71
+ ADAGRAD_OPTIMIZER = 'adagrad'
72
+ ADAM_OPTIMIZER = 'adam'
73
+ ADAMW_OPTIMIZER = 'adamw'
74
+ LAMB_OPTIMIZER = 'lamb'
75
+ ONEBIT_ADAM_OPTIMIZER = 'onebitadam'
76
+ ZERO_ONE_ADAM_OPTIMIZER = 'zerooneadam'
77
+ ONEBIT_LAMB_OPTIMIZER = 'onebitlamb'
78
+ MUADAM_OPTIMIZER = 'muadam'
79
+ MUADAMW_OPTIMIZER = 'muadamw'
80
+ MUSGD_OPTIMIZER = 'musgd'
81
+ LION_OPTIMIZER = 'lion'
82
+ DEEPSPEED_OPTIMIZERS = [
83
+ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER,
84
+ ZERO_ONE_ADAM_OPTIMIZER, MUADAM_OPTIMIZER, MUADAMW_OPTIMIZER, MUSGD_OPTIMIZER, LION_OPTIMIZER
85
+ ]
86
+
87
+ # extra optimizer parameters for adam/adamw
88
+ TORCH_ADAM_PARAM = "torch_adam"
89
+
90
+ # default to adamw logic for adam/adamw optimizers unless user explicitly opts out
91
+ ADAM_W_MODE = "adam_w_mode"
92
+ ADAM_W_MODE_DEFAULT = True
93
+
94
+
95
+ class DeepSpeedConfigError(Exception):
96
+ pass
97
+
98
+
99
+ class DtypeEnum(Enum):
100
+ # The torch dtype must always be the first value (so we return torch.dtype)
101
+ fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
102
+ fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
103
+ int8 = torch.int8, "torch.int8", "int8"
104
+ bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16"
105
+
106
+ # Copied from https://stackoverflow.com/a/43210118
107
+ # Allows us to use multiple values for each Enum index and returns first
108
+ # listed value when Enum is called
109
+ def __new__(cls, *values):
110
+ obj = object.__new__(cls)
111
+ # first value is canonical value
112
+ obj._value_ = values[0]
113
+ for other_value in values[1:]:
114
+ cls._value2member_map_[other_value] = obj
115
+ obj._all_values = values
116
+ return obj
117
+
118
+ def __repr__(self):
119
+ return "<%s.%s: %s>" % (
120
+ self.__class__.__name__,
121
+ self._name_,
122
+ ", ".join([repr(v) for v in self._all_values]),
123
+ )
124
+
125
+
126
+ def get_pld_enabled(param_dict):
127
+ if PROGRESSIVE_LAYER_DROP in param_dict.keys():
128
+ return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], PLD_ENABLED, PLD_ENABLED_DEFAULT)
129
+ else:
130
+ return False
131
+
132
+
133
+ def get_pld_params(param_dict):
134
+ if PROGRESSIVE_LAYER_DROP in param_dict.keys():
135
+ pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP])
136
+ pld_params.pop(PLD_ENABLED)
137
+ return pld_params
138
+ else:
139
+ return False
140
+
141
+
142
+ def get_amp_enabled(param_dict):
143
+ if AMP in param_dict.keys():
144
+ return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
145
+ else:
146
+ return False
147
+
148
+
149
+ def get_amp_params(param_dict):
150
+ if AMP in param_dict.keys():
151
+ amp_params = copy.copy(param_dict[AMP])
152
+ amp_params.pop(AMP_ENABLED)
153
+ return amp_params
154
+ else:
155
+ return False
156
+
157
+
158
+ def get_fp16_enabled(param_dict):
159
+ if FP16 in param_dict.keys():
160
+ return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
161
+ else:
162
+ return False
163
+
164
+
165
+ def get_bfloat16_enabled(param_dict):
166
+ for key in [BFLOAT16, BFLOAT16_OLD]:
167
+ if key in param_dict.keys():
168
+ return get_scalar_param(param_dict[key], BFLOAT16_ENABLED, BFLOAT16_ENABLED_DEFAULT)
169
+ return False
170
+
171
+
172
+ def get_bfloat16_immediate_grad_update(param_dict):
173
+ for key in [BFLOAT16, BFLOAT16_OLD]:
174
+ if key in param_dict.keys():
175
+ return get_scalar_param(param_dict[key], BFLOAT16_IMMEDIATE_GRAD_UPDATE,
176
+ BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT)
177
+ return False
178
+
179
+
180
+ def get_fp16_master_weights_and_grads_enabled(param_dict):
181
+ if get_fp16_enabled(param_dict):
182
+ return get_scalar_param(param_dict[FP16], FP16_MASTER_WEIGHTS_AND_GRADS, FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT)
183
+ else:
184
+ return False
185
+
186
+
187
+ def get_fp16_auto_cast(param_dict):
188
+ if get_fp16_enabled(param_dict):
189
+ return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT)
190
+
191
+
192
+ def get_loss_scale(param_dict):
193
+ if get_fp16_enabled(param_dict):
194
+ return get_scalar_param(param_dict[FP16], FP16_LOSS_SCALE, FP16_LOSS_SCALE_DEFAULT)
195
+ elif get_bfloat16_enabled(param_dict):
196
+ return 1.0
197
+ else:
198
+ return FP16_LOSS_SCALE_DEFAULT
199
+
200
+
201
+ def get_initial_dynamic_scale(param_dict):
202
+ if get_fp16_enabled(param_dict):
203
+ initial_scale_power = get_scalar_param(param_dict[FP16], FP16_INITIAL_SCALE_POWER,
204
+ FP16_INITIAL_SCALE_POWER_DEFAULT)
205
+ elif get_bfloat16_enabled(param_dict):
206
+ initial_scale_power = 0
207
+ else:
208
+ initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
209
+
210
+ return 2**initial_scale_power
211
+
212
+
213
+ def get_dynamic_loss_scale_args(param_dict):
214
+ loss_scale_args = None
215
+ if get_fp16_enabled(param_dict):
216
+ fp16_dict = param_dict[FP16]
217
+ dynamic_loss_args = [
218
+ FP16_INITIAL_SCALE_POWER,
219
+ FP16_LOSS_SCALE_WINDOW,
220
+ FP16_MIN_LOSS_SCALE,
221
+ FP16_HYSTERESIS,
222
+ FP16_CONSECUTIVE_HYSTERESIS,
223
+ ]
224
+ if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
225
+ init_scale = get_scalar_param(fp16_dict, FP16_INITIAL_SCALE_POWER, FP16_INITIAL_SCALE_POWER_DEFAULT)
226
+ scale_window = get_scalar_param(fp16_dict, FP16_LOSS_SCALE_WINDOW, FP16_LOSS_SCALE_WINDOW_DEFAULT)
227
+ delayed_shift = get_scalar_param(fp16_dict, FP16_HYSTERESIS, FP16_HYSTERESIS_DEFAULT)
228
+ consecutive_hysteresis = get_scalar_param(fp16_dict, FP16_CONSECUTIVE_HYSTERESIS,
229
+ FP16_CONSECUTIVE_HYSTERESIS_DEFAULT)
230
+ min_loss_scale = get_scalar_param(fp16_dict, FP16_MIN_LOSS_SCALE, FP16_MIN_LOSS_SCALE_DEFAULT)
231
+ loss_scale_args = {
232
+ INITIAL_LOSS_SCALE: 2**init_scale,
233
+ SCALE_WINDOW: scale_window,
234
+ DELAYED_SHIFT: delayed_shift,
235
+ CONSECUTIVE_HYSTERESIS: consecutive_hysteresis,
236
+ MIN_LOSS_SCALE: min_loss_scale,
237
+ }
238
+
239
+ return loss_scale_args
240
+
241
+
242
+ def get_gradient_accumulation_steps(param_dict):
243
+ return get_scalar_param(param_dict, GRADIENT_ACCUMULATION_STEPS, GRADIENT_ACCUMULATION_STEPS_DEFAULT)
244
+
245
+
246
+ def get_sparse_gradients_enabled(param_dict):
247
+ return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT)
248
+
249
+
250
+ def get_communication_data_type(param_dict,
251
+ comm_type=COMMUNICATION_DATA_TYPE,
252
+ comm_data_type_default=COMMUNICATION_DATA_TYPE_DEFAULT):
253
+ val = get_scalar_param(param_dict, comm_type, comm_data_type_default)
254
+ val = val.lower() if val is not None else val
255
+ if val is None:
256
+ return val # we must determine it by other parameters
257
+ elif val == "fp32":
258
+ return torch.float32
259
+ elif val == "fp16":
260
+ return torch.float16
261
+ elif val == "bf16":
262
+ return torch.bfloat16
263
+
264
+ raise ValueError(f"Invalid communication_data_type. Supported data types: ['fp16', 'bf16', 'fp32']. Got: {val}")
265
+
266
+
267
+ def get_prescale_gradients(param_dict):
268
+ return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT)
269
+
270
+
271
+ def get_gradient_predivide_factor(param_dict):
272
+ return get_scalar_param(param_dict, GRADIENT_PREDIVIDE_FACTOR, GRADIENT_PREDIVIDE_FACTOR_DEFAULT)
273
+
274
+
275
+ def get_steps_per_print(param_dict):
276
+ return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT)
277
+
278
+
279
+ def get_disable_allgather(param_dict):
280
+ return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT)
281
+
282
+
283
+ def get_dump_state(param_dict):
284
+ return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT)
285
+
286
+
287
+ def get_gradient_clipping(param_dict):
288
+ return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT)
289
+
290
+
291
+ def get_graph_harvesting(param_dict):
292
+ return get_scalar_param(param_dict, GRAPH_HARVESTING, GRAPH_HARVESTING_DEFAULT)
293
+
294
+
295
+ def get_sparse_attention(param_dict):
296
+ if SPARSE_ATTENTION in param_dict.keys():
297
+ sparsity = param_dict[SPARSE_ATTENTION]
298
+ mode = get_sparse_attention_mode(sparsity)
299
+
300
+ if mode == SPARSE_DENSE_MODE:
301
+ return get_sparse_dense_config(sparsity)
302
+ elif mode == SPARSE_FIXED_MODE:
303
+ return get_sparse_fixed_config(sparsity)
304
+ elif mode == SPARSE_VARIABLE_MODE:
305
+ return get_sparse_variable_config(sparsity)
306
+ elif mode == SPARSE_BIGBIRD_MODE:
307
+ return get_sparse_bigbird_config(sparsity)
308
+ elif mode == SPARSE_BSLONGFORMER_MODE:
309
+ return get_sparse_bslongformer_config(sparsity)
310
+ else:
311
+ raise NotImplementedError(f"Given sparsity mode, {mode}, has not been implemented yet!")
312
+
313
+ else:
314
+ return None
315
+
316
+
317
+ def get_sparse_dense_config(sparsity):
318
+ block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
319
+ return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block}
320
+
321
+
322
+ def get_sparse_fixed_config(sparsity):
323
+ block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
324
+ different_layout_per_head = get_scalar_param(
325
+ sparsity,
326
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
327
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
328
+ )
329
+ num_local_blocks = get_scalar_param(sparsity, SPARSE_NUM_LOCAL_BLOCKS, SPARSE_NUM_LOCAL_BLOCKS_DEFAULT)
330
+ num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
331
+ attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
332
+ horizontal_global_attention = get_scalar_param(
333
+ sparsity,
334
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
335
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
336
+ )
337
+ num_different_global_patterns = get_scalar_param(
338
+ sparsity,
339
+ SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS,
340
+ SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT,
341
+ )
342
+
343
+ return {
344
+ SPARSE_MODE: SPARSE_FIXED_MODE,
345
+ SPARSE_BLOCK: block,
346
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
347
+ SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks,
348
+ SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
349
+ SPARSE_ATTENTION_TYPE: attention,
350
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
351
+ SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns,
352
+ }
353
+
354
+
355
+ def get_sparse_variable_config(sparsity):
356
+ block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
357
+ different_layout_per_head = get_scalar_param(
358
+ sparsity,
359
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
360
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
361
+ )
362
+ num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
363
+ local_window_blocks = get_scalar_param(sparsity, SPARSE_LOCAL_WINDOW_BLOCKS, SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT)
364
+ global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
365
+ global_block_end_indices = get_scalar_param(
366
+ sparsity,
367
+ SPARSE_GLOBAL_BLOCK_END_INDICES,
368
+ SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
369
+ )
370
+ attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
371
+ horizontal_global_attention = get_scalar_param(
372
+ sparsity,
373
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
374
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
375
+ )
376
+
377
+ return {
378
+ SPARSE_MODE: SPARSE_VARIABLE_MODE,
379
+ SPARSE_BLOCK: block,
380
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
381
+ SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
382
+ SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks,
383
+ SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
384
+ SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
385
+ SPARSE_ATTENTION_TYPE: attention,
386
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
387
+ }
388
+
389
+
390
+ def get_sparse_bigbird_config(sparsity):
391
+ block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
392
+ different_layout_per_head = get_scalar_param(
393
+ sparsity,
394
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
395
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
396
+ )
397
+ num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
398
+ num_sliding_window_blocks = get_scalar_param(
399
+ sparsity,
400
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
401
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
402
+ )
403
+ num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
404
+
405
+ return {
406
+ SPARSE_MODE: SPARSE_BIGBIRD_MODE,
407
+ SPARSE_BLOCK: block,
408
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
409
+ SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
410
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
411
+ SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
412
+ }
413
+
414
+
415
+ def get_sparse_bslongformer_config(sparsity):
416
+ block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
417
+ different_layout_per_head = get_scalar_param(
418
+ sparsity,
419
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
420
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
421
+ )
422
+ num_sliding_window_blocks = get_scalar_param(
423
+ sparsity,
424
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
425
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
426
+ )
427
+ global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
428
+ global_block_end_indices = get_scalar_param(
429
+ sparsity,
430
+ SPARSE_GLOBAL_BLOCK_END_INDICES,
431
+ SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
432
+ )
433
+
434
+ return {
435
+ SPARSE_MODE: SPARSE_BSLONGFORMER_MODE,
436
+ SPARSE_BLOCK: block,
437
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
438
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
439
+ SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
440
+ SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
441
+ }
442
+
443
+
444
+ def get_sparse_attention_mode(param_dict):
445
+ if SPARSE_MODE in param_dict.keys():
446
+ return param_dict[SPARSE_MODE]
447
+ else:
448
+ return SPARSE_MODE_DEFAULT
449
+
450
+
451
+ def get_sparse_attention_type(param_dict):
452
+ if SPARSE_ATTENTION_TYPE in param_dict.keys():
453
+ return param_dict[SPARSE_ATTENTION_TYPE]
454
+ else:
455
+ return SPARSE_ATTENTION_TYPE_DEFAULT
456
+
457
+
458
+ def get_pipeline_config(param_dict):
459
+ """Parses pipeline engine configuration. """
460
+ default_pipeline = {
461
+ "stages": "auto",
462
+ "partition": "best",
463
+ "seed_layers": False,
464
+ "activation_checkpoint_interval": 0,
465
+ "pipe_partitioned": True,
466
+ "grad_partitioned": True,
467
+ }
468
+ config = default_pipeline
469
+ for key, val in param_dict.get("pipeline", {}).items():
470
+ config[key] = val
471
+ return config
472
+
473
+
474
+ def get_optimizer_name(param_dict):
475
+ if OPTIMIZER in param_dict.keys() and TYPE in param_dict[OPTIMIZER].keys():
476
+ return param_dict[OPTIMIZER][TYPE]
477
+ else:
478
+ return OPTIMIZER_TYPE_DEFAULT
479
+
480
+
481
+ def get_optimizer_params(param_dict):
482
+ if (get_optimizer_name(param_dict) is not None and OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys()):
483
+ return param_dict[OPTIMIZER][OPTIMIZER_PARAMS]
484
+ else:
485
+ return None
486
+
487
+
488
+ def get_optimizer_gradient_clipping(param_dict):
489
+ optimizer_params = get_optimizer_params(param_dict)
490
+ if optimizer_params is not None and MAX_GRAD_NORM in optimizer_params.keys():
491
+ return optimizer_params[MAX_GRAD_NORM]
492
+ else:
493
+ return None
494
+
495
+
496
+ def get_optimizer_legacy_fusion(param_dict):
497
+ if OPTIMIZER in param_dict.keys() and LEGACY_FUSION in param_dict[OPTIMIZER].keys():
498
+ return param_dict[OPTIMIZER][LEGACY_FUSION]
499
+ else:
500
+ return LEGACY_FUSION_DEFAULT
501
+
502
+
503
+ def get_zero_allow_untested_optimizer(param_dict):
504
+ return get_scalar_param(param_dict, ZERO_ALLOW_UNTESTED_OPTIMIZER, ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT)
505
+
506
+
507
+ def get_zero_force_ds_cpu_optimizer(param_dict):
508
+ return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT)
509
+
510
+
511
+ def get_scheduler_name(param_dict):
512
+ if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys():
513
+ return param_dict[SCHEDULER][TYPE]
514
+ else:
515
+ return SCHEDULER_TYPE_DEFAULT
516
+
517
+
518
+ def get_scheduler_params(param_dict):
519
+ if (get_scheduler_name(param_dict) is not None and SCHEDULER_PARAMS in param_dict[SCHEDULER].keys()):
520
+ return param_dict[SCHEDULER][SCHEDULER_PARAMS]
521
+ else:
522
+ return None
523
+
524
+
525
+ def get_train_batch_size(param_dict):
526
+ return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT)
527
+
528
+
529
+ def get_train_micro_batch_size_per_gpu(param_dict):
530
+ return get_scalar_param(
531
+ param_dict,
532
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU,
533
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT,
534
+ )
535
+
536
+
537
+ def get_wall_clock_breakdown(param_dict):
538
+ return get_scalar_param(param_dict, WALL_CLOCK_BREAKDOWN, WALL_CLOCK_BREAKDOWN_DEFAULT)
539
+
540
+
541
+ def get_memory_breakdown(param_dict):
542
+ return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT)
543
+
544
+
545
+ class HybridEngineConfig(DeepSpeedConfigModel):
546
+ enabled: bool = False
547
+ max_out_tokens: int = 512
548
+ inference_tp_size: int = 1
549
+ release_inference_cache: bool = False
550
+ pin_parameters: bool = True
551
+ tp_gather_partition_size: int = 8
552
+
553
+
554
+ def get_hybrid_engine_config(param_dict):
555
+ hybrid_engine_config_dict = param_dict.get("hybrid_engine", {})
556
+ hybrid_engine_config = HybridEngineConfig(**hybrid_engine_config_dict)
557
+ return hybrid_engine_config
558
+
559
+
560
+ def get_expert_data_topo_config(param_dict):
561
+ return get_scalar_param(param_dict, USE_DATA_BEFORE_EXPERT_PARALLEL, USE_DATA_BEFORE_EXPERT_PARALLEL_DEFAULT)
562
+
563
+
564
+ def get_eigenvalue_config(param_dict):
565
+ if get_quantize_enabled(param_dict):
566
+ param_dict = param_dict[QUANTIZE_TRAINING]
567
+ assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled"
568
+ return (
569
+ get_eigenvalue_enabled(param_dict),
570
+ get_eigenvalue_verbose(param_dict),
571
+ get_eigenvalue_max_iter(param_dict),
572
+ get_eigenvalue_tol(param_dict),
573
+ get_eigenvalue_stability(param_dict),
574
+ get_eigenvalue_gas_boundary_resolution(param_dict),
575
+ get_eigenvalue_layer_name(param_dict),
576
+ get_eigenvalue_layer_num(param_dict),
577
+ )
578
+ else:
579
+ return (
580
+ EIGENVALUE_ENABLED_DEFAULT,
581
+ EIGENVALUE_VERBOSE_DEFAULT,
582
+ EIGENVALUE_MAX_ITER_DEFAULT,
583
+ EIGENVALUE_TOL_DEFAULT,
584
+ EIGENVALUE_STABILITY_DEFAULT,
585
+ EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
586
+ EIGENVALUE_LAYER_NAME_DEFAULT,
587
+ EIGENVALUE_LAYER_NUM_DEFAULT,
588
+ )
589
+
590
+
591
+ def get_eigenvalue_enabled(param_dict):
592
+ if EIGENVALUE in param_dict.keys():
593
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_ENABLED, EIGENVALUE_ENABLED_DEFAULT)
594
+ else:
595
+ return EIGENVALUE_ENABLED_DEFAULT
596
+
597
+
598
+ def get_eigenvalue_verbose(param_dict):
599
+ if EIGENVALUE in param_dict.keys():
600
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_VERBOSE, EIGENVALUE_VERBOSE_DEFAULT)
601
+ else:
602
+ return EIGENVALUE_VERBOSE_DEFAULT
603
+
604
+
605
+ def get_eigenvalue_max_iter(param_dict):
606
+ if EIGENVALUE in param_dict.keys():
607
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_MAX_ITER, EIGENVALUE_MAX_ITER_DEFAULT)
608
+ else:
609
+ return EIGENVALUE_MAX_ITER_DEFAULT
610
+
611
+
612
+ def get_eigenvalue_tol(param_dict):
613
+ if EIGENVALUE in param_dict.keys():
614
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_TOL, EIGENVALUE_TOL_DEFAULT)
615
+ else:
616
+ return EIGENVALUE_TOL_DEFAULT
617
+
618
+
619
+ def get_eigenvalue_stability(param_dict):
620
+ if EIGENVALUE in param_dict.keys():
621
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_STABILITY, EIGENVALUE_STABILITY_DEFAULT)
622
+ else:
623
+ return EIGENVALUE_STABILITY_DEFAULT
624
+
625
+
626
+ def get_eigenvalue_gas_boundary_resolution(param_dict):
627
+ if EIGENVALUE in param_dict.keys():
628
+ return get_scalar_param(
629
+ param_dict[EIGENVALUE],
630
+ EIGENVALUE_GAS_BOUNDARY_RESOLUTION,
631
+ EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
632
+ )
633
+ else:
634
+ return EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT
635
+
636
+
637
+ def get_eigenvalue_layer_name(param_dict):
638
+ if EIGENVALUE in param_dict.keys():
639
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NAME, EIGENVALUE_LAYER_NAME_DEFAULT)
640
+ else:
641
+ return EIGENVALUE_LAYER_NAME_DEFAULT
642
+
643
+
644
+ def get_eigenvalue_layer_num(param_dict):
645
+ if EIGENVALUE in param_dict.keys():
646
+ return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NUM, EIGENVALUE_LAYER_NUM_DEFAULT)
647
+ else:
648
+ return EIGENVALUE_LAYER_NUM_DEFAULT
649
+
650
+
651
+ def get_checkpoint_params(param_dict):
652
+ return param_dict.get(CHECKPOINT, {})
653
+
654
+
655
+ def get_data_types_params(param_dict):
656
+ return param_dict.get(DATA_TYPES, {})
657
+
658
+
659
+ def get_checkpoint_tag_validation_mode(checkpoint_params):
660
+ tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT)
661
+ tag_validation_mode = tag_validation_mode.upper()
662
+ if tag_validation_mode in CHECKPOINT_TAG_VALIDATION_MODES:
663
+ return tag_validation_mode
664
+ else:
665
+ raise DeepSpeedConfigError(
666
+ "Checkpoint config contains invalid tag_validation "
667
+ f"value of {tag_validation_mode}, expecting one of {CHECKPOINT_TAG_VALIDATION_MODES}")
668
+
669
+
670
+ def get_checkpoint_parallel_write_pipeline(checkpoint_params):
671
+ par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {})
672
+ par_write_pipeline = par_write_params.get(CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE,
673
+ CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT)
674
+ if par_write_pipeline in [True, False]:
675
+ return par_write_pipeline
676
+ else:
677
+ raise DeepSpeedConfigError("checkpoint::parallel_write::pipeline_stage "
678
+ f"value of '{par_write_pipeline}' is invalid, expecting: true or false")
679
+
680
+
681
+ def get_dataloader_drop_last(param_dict):
682
+ return get_scalar_param(param_dict, DATALOADER_DROP_LAST, DATALOADER_DROP_LAST_DEFAULT)
683
+
684
+
685
+ '''Write deepspeed config files by modifying basic templates.
686
+ Can be used for quickly changing parameters via command line parameters.'''
687
+
688
+
689
+ class DeepSpeedConfigWriter:
690
+
691
+ def __init__(self, data=None):
692
+ self.data = data if data is not None else {}
693
+
694
+ def add_config(self, key, value):
695
+ self.data[key] = value
696
+
697
+ def load_config(self, filename):
698
+ self.data = json.load(open(filename, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
699
+
700
+ def write_config(self, filename):
701
+ with open(filename, "w") as outfile:
702
+ json.dump(self.data, outfile)
703
+
704
+
705
+ class DeepSpeedConfig(object):
706
+
707
+ def __init__(self, config: Union[str, dict], mpu=None):
708
+ super(DeepSpeedConfig, self).__init__()
709
+ if isinstance(config, dict):
710
+ self._param_dict = config
711
+ elif os.path.exists(config):
712
+ self._param_dict = hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
713
+ else:
714
+ try:
715
+ config_decoded = base64.urlsafe_b64decode(config).decode('utf-8')
716
+ self._param_dict = hjson.loads(config_decoded)
717
+ except (UnicodeDecodeError, AttributeError):
718
+ raise ValueError(
719
+ f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}"
720
+ )
721
+ try:
722
+ self.global_rank = dist.get_rank()
723
+ if mpu is None:
724
+ self.world_size = dist.get_world_size()
725
+ else:
726
+ self.world_size = mpu.get_data_parallel_world_size()
727
+ except:
728
+ self.global_rank = 0
729
+ self.world_size = 1
730
+
731
+ # If elastic-mode enabled, update compute + update _param_dict
732
+ self.elasticity_enabled = elasticity_enabled(self._param_dict)
733
+ if self.elasticity_enabled:
734
+ logger.info("DeepSpeed elasticity support enabled")
735
+ final_batch_size, valid_gpus, micro_batch_size = compute_elastic_config(
736
+ ds_config=self._param_dict,
737
+ target_deepspeed_version=__version__,
738
+ world_size=self.world_size,
739
+ )
740
+
741
+ elastic_dict = self._param_dict[ELASTICITY]
742
+
743
+ # Ensure the resource scheduler saw the same elastic config we are using at runtime
744
+ ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict)
745
+
746
+ self.elastic_model_parallel_size = elastic_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
747
+ if self.elastic_model_parallel_size < 1:
748
+ raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
749
+ f"given model-parallel size: {self.elastic_model_parallel_size}")
750
+
751
+ self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
752
+ if self.num_gpus_per_node < 1:
753
+ raise ElasticityConfigError("NUmber of GPUs per node cannot be less than 1, "
754
+ f"given number of GPUs per node: {self.num_gpus_per_node}")
755
+
756
+ ignore_non_elastic_batch_info = elastic_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
757
+ IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
758
+
759
+ if not ignore_non_elastic_batch_info:
760
+ batch_params = [
761
+ TRAIN_BATCH_SIZE,
762
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU,
763
+ GRADIENT_ACCUMULATION_STEPS,
764
+ ]
765
+ if any(map(lambda t: t in self._param_dict, batch_params)):
766
+ raise ElasticityConfigError("One or more batch related parameters were found in your " \
767
+ f"ds_config ({TRAIN_BATCH_SIZE}, {TRAIN_MICRO_BATCH_SIZE_PER_GPU}, and/or " \
768
+ f"{GRADIENT_ACCUMULATION_STEPS}). These parameters *will not be used* since " \
769
+ "elastic training is enabled, which takes control of these parameters. " \
770
+ "If you want to suppress this error (the parameters will be silently ignored) " \
771
+ f"please set {IGNORE_NON_ELASTIC_BATCH_INFO}':true in your elasticity config.")
772
+
773
+ # micro_bsz * world_size * gas = total_batch_size
774
+ # gas = total_batch_size // (micro_bsz * world_size)
775
+ gradient_accu_steps = final_batch_size // (micro_batch_size * self.world_size)
776
+
777
+ if TRAIN_BATCH_SIZE in self._param_dict:
778
+ logger.warning("[Elasticity] overriding training_batch_size: "
779
+ f"{self._param_dict[TRAIN_BATCH_SIZE]} -> {final_batch_size}")
780
+ if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self._param_dict:
781
+ logger.warning("[Elasticity] overriding train_micro_batch_size_per_gpu: "
782
+ f"{self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU]} -> {micro_batch_size}")
783
+ if GRADIENT_ACCUMULATION_STEPS in self._param_dict:
784
+ logger.warning("[Elasticity] overriding gradient_accumulation_steps: "
785
+ f"{self._param_dict[GRADIENT_ACCUMULATION_STEPS]} -> {gradient_accu_steps}")
786
+
787
+ logger.info(f"[Elasticity] valid GPU counts: {valid_gpus}")
788
+
789
+ self._param_dict[TRAIN_BATCH_SIZE] = final_batch_size
790
+ self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size
791
+ self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps
792
+
793
+ # Pass a copy so that user json is unmodified, e.g. for logging
794
+ self._initialize_params(copy.copy(self._param_dict))
795
+ self._configure_train_batch_size()
796
+ self._do_sanity_check()
797
+
798
+ def _initialize_params(self, param_dict):
799
+ self.train_batch_size = get_train_batch_size(param_dict)
800
+ #print(f"beginning get_train_batch_size = {get_train_batch_size}")
801
+ self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(param_dict)
802
+ self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict)
803
+ self.steps_per_print = get_steps_per_print(param_dict)
804
+ self.dump_state = get_dump_state(param_dict)
805
+
806
+ self.disable_allgather = get_disable_allgather(param_dict)
807
+ self.communication_data_type = get_communication_data_type(param_dict)
808
+ self.seq_parallel_communication_data_type = get_communication_data_type(
809
+ param_dict, SEQ_PARALLEL_COMMUNICATION_DATA_TYPE, SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_DEFAULT)
810
+ self.prescale_gradients = get_prescale_gradients(param_dict)
811
+ self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict)
812
+ self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict)
813
+
814
+ self.zero_config = get_zero_config(param_dict)
815
+ self.mics_shard_size = self.zero_config.mics_shard_size
816
+ self.mics_hierarchial_params_gather = self.zero_config.mics_hierarchical_params_gather
817
+ self.zero_optimization_stage = self.zero_config.stage
818
+ self.zero_enabled = self.zero_optimization_stage > 0
819
+
820
+ self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(param_dict)
821
+
822
+ self.comms_config = DeepSpeedCommsConfig(param_dict)
823
+ self.monitor_config = get_monitor_config(param_dict)
824
+
825
+ self.gradient_clipping = get_gradient_clipping(param_dict)
826
+ self.fp16_enabled = get_fp16_enabled(param_dict)
827
+ self.fp16_auto_cast = get_fp16_auto_cast(param_dict)
828
+ self.bfloat16_enabled = get_bfloat16_enabled(param_dict)
829
+ self.bfloat16_immediate_grad_update = get_bfloat16_immediate_grad_update(param_dict)
830
+ assert not (self.fp16_enabled
831
+ and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled'
832
+ self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict)
833
+ self.amp_enabled = get_amp_enabled(param_dict)
834
+ self.amp_params = get_amp_params(param_dict)
835
+ self.loss_scale = get_loss_scale(param_dict)
836
+ self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict)
837
+ self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict)
838
+
839
+ self.compression_config = get_compression_config(param_dict)
840
+ self.graph_harvesting = get_graph_harvesting(param_dict)
841
+
842
+ self.optimizer_name = get_optimizer_name(param_dict)
843
+ if (self.optimizer_name is not None and self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS):
844
+ self.optimizer_name = self.optimizer_name.lower()
845
+
846
+ self.optimizer_params = get_optimizer_params(param_dict)
847
+ self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict)
848
+
849
+ self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(param_dict)
850
+
851
+ self.zero_force_ds_cpu_optimizer = get_zero_force_ds_cpu_optimizer(param_dict)
852
+
853
+ self.scheduler_name = get_scheduler_name(param_dict)
854
+ self.scheduler_params = get_scheduler_params(param_dict)
855
+
856
+ self.flops_profiler_config = DeepSpeedFlopsProfilerConfig(param_dict)
857
+ self.wall_clock_breakdown = (get_wall_clock_breakdown(param_dict) | self.flops_profiler_config.enabled)
858
+ self.memory_breakdown = get_memory_breakdown(param_dict)
859
+ self.autotuning_config = DeepSpeedAutotuningConfig(param_dict)
860
+
861
+ (
862
+ self.eigenvalue_enabled,
863
+ self.eigenvalue_verbose,
864
+ self.eigenvalue_max_iter,
865
+ self.eigenvalue_tol,
866
+ self.eigenvalue_stability,
867
+ self.eigenvalue_gas_boundary_resolution,
868
+ self.eigenvalue_layer_name,
869
+ self.eigenvalue_layer_num,
870
+ ) = get_eigenvalue_config(param_dict)
871
+
872
+ self.use_data_before_expert_parallel_ = get_expert_data_topo_config(param_dict)
873
+ self.hybrid_engine = get_hybrid_engine_config(param_dict)
874
+
875
+ self.sparse_attention = get_sparse_attention(param_dict)
876
+ self.pipeline = get_pipeline_config(param_dict)
877
+
878
+ self.pld_enabled = get_pld_enabled(param_dict)
879
+ self.pld_params = get_pld_params(param_dict)
880
+
881
+ self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict)
882
+ self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict)
883
+
884
+ self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict)
885
+ self.data_efficiency_config = get_data_efficiency_config(param_dict)
886
+
887
+ checkpoint_params = get_checkpoint_params(param_dict)
888
+ validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params)
889
+ self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE)
890
+ self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL
891
+ self.load_universal_checkpoint = checkpoint_params.get(LOAD_UNIVERSAL_CHECKPOINT,
892
+ LOAD_UNIVERSAL_CHECKPOINT_DEFAULT)
893
+
894
+ self.use_node_local_storage = checkpoint_params.get(USE_NODE_LOCAL_STORAGE_CHECKPOINT,
895
+ USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT)
896
+
897
+ data_types_params = get_data_types_params(param_dict)
898
+ self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, GRAD_ACCUM_DTYPE_DEFAULT)
899
+
900
+ par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params)
901
+ self.checkpoint_parallel_write_pipeline = par_write_pipe
902
+
903
+ self.aio_config = get_aio_config(param_dict)
904
+
905
+ self.dataloader_drop_last = get_dataloader_drop_last(param_dict)
906
+
907
+ self.nebula_config = DeepSpeedNebulaConfig(param_dict)
908
+
909
+ self.weight_quantization_config = WeightQuantConfig(
910
+ **param_dict['weight_quantization']) if 'weight_quantization' in param_dict else None
911
+
912
+ self.compile_config = get_compile_config(param_dict)
913
+
914
+ def _batch_assertion(self):
915
+
916
+ train_batch = self.train_batch_size
917
+ micro_batch = self.train_micro_batch_size_per_gpu
918
+ grad_acc = self.gradient_accumulation_steps
919
+
920
+ assert (train_batch > 0), f"Train batch size: {train_batch} has to be greater than 0"
921
+
922
+ assert (micro_batch > 0), f"Micro batch size per gpu: {micro_batch} has to be greater than 0"
923
+
924
+ assert (grad_acc > 0), f"Gradient accumulation steps: {grad_acc} has to be greater than 0"
925
+
926
+ assert train_batch == micro_batch * grad_acc * self.world_size, (
927
+ f"Check batch related parameters. train_batch_size is not equal "
928
+ "to micro_batch_per_gpu * gradient_acc_step * world_size "
929
+ f"{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}")
930
+
931
+ def _set_batch_related_parameters(self):
932
+
933
+ train_batch = self.train_batch_size
934
+ micro_batch = self.train_micro_batch_size_per_gpu
935
+ grad_acc = self.gradient_accumulation_steps
936
+
937
+ #print(f"train_batch = {train_batch}, micro_batch={micro_batch}")
938
+
939
+ # all values are provided nothing needs to be set
940
+ if train_batch is not None and micro_batch is not None and grad_acc is not None:
941
+ return
942
+
943
+ # global_accumulation_steps needs to be set
944
+ elif train_batch is not None and micro_batch is not None:
945
+ grad_acc = train_batch // micro_batch
946
+ grad_acc //= self.world_size
947
+ self.gradient_accumulation_steps = grad_acc
948
+
949
+ # micro_batch_per_gpu needs to be set
950
+ elif train_batch is not None and grad_acc is not None:
951
+ micro_batch = train_batch // self.world_size
952
+ micro_batch //= grad_acc
953
+ self.train_micro_batch_size_per_gpu = micro_batch
954
+
955
+ # train_batch_size needs to be set
956
+ elif micro_batch is not None and grad_acc is not None:
957
+ train_batch_size = micro_batch * grad_acc
958
+ train_batch_size *= self.world_size
959
+ self.train_batch_size = train_batch_size
960
+
961
+ # gradient_accumulation_steps and micro_batch_per_gpus is set
962
+ elif train_batch is not None:
963
+ self.gradient_accumulation_steps = 1
964
+ self.train_micro_batch_size_per_gpu = train_batch // self.world_size
965
+
966
+ # train_batch_size and gradient_accumulation_step is set
967
+ elif micro_batch is not None:
968
+ self.train_batch_size = micro_batch * self.world_size
969
+ self.gradient_accumulation_steps = 1
970
+
971
+ # either none of the three parameters are provided or just gradient_accumulation_step is provided
972
+ else:
973
+ assert False, \
974
+ 'Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided'
975
+
976
+ def _configure_train_batch_size(self):
977
+ self._set_batch_related_parameters()
978
+ self._batch_assertion()
979
+
980
+ def _do_sanity_check(self):
981
+ self._do_error_check()
982
+
983
+ self._do_warning_check()
984
+
985
+ def print_user_config(self):
986
+ logger.info(" json = {}".format(
987
+ json.dumps(
988
+ self._param_dict,
989
+ sort_keys=True,
990
+ indent=4,
991
+ cls=ScientificNotationEncoder,
992
+ separators=(",", ":"),
993
+ )))
994
+
995
+ def print(self, name):
996
+ logger.info("{}:".format(name))
997
+ for arg in sorted(vars(self)):
998
+ if arg != "_param_dict":
999
+ dots = "." * (29 - len(arg))
1000
+ logger.info(" {} {} {}".format(arg, dots, getattr(self, arg)))
1001
+
1002
+ self.print_user_config()
1003
+
1004
+ def _do_error_check(self):
1005
+ assert (self.train_micro_batch_size_per_gpu
1006
+ ), "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
1007
+
1008
+ assert (
1009
+ self.gradient_accumulation_steps), "DeepSpeedConfig: {} is not defined".format(GRADIENT_ACCUMULATION_STEPS)
1010
+
1011
+ if self.zero_enabled:
1012
+ assert (self.zero_optimization_stage <=
1013
+ ZeroStageEnum.max_stage), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format(
1014
+ ZeroStageEnum.max_stage)
1015
+
1016
+ if self.fp16_master_weights_and_gradients:
1017
+ assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now."
1018
+
1019
+ def _do_warning_check(self):
1020
+ fp16_enabled = self.fp16_enabled
1021
+
1022
+ vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT)
1023
+ if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0:
1024
+ logger.warning(
1025
+ "DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization.".format(
1026
+ vocabulary_size, TENSOR_CORE_ALIGN_SIZE))
1027
+
1028
+ if (self.optimizer_params is not None and MAX_GRAD_NORM in self.optimizer_params.keys()
1029
+ and self.optimizer_params[MAX_GRAD_NORM] > 0):
1030
+ if fp16_enabled:
1031
+ if self.global_rank == 0:
1032
+ logger.warning("DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper".format(
1033
+ MAX_GRAD_NORM, self.optimizer_params[MAX_GRAD_NORM]))
1034
+ else:
1035
+ if self.global_rank == 0:
1036
+ logger.warning(
1037
+ "DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero"
1038
+ .format(self.optimizer_params[MAX_GRAD_NORM]))
1039
+ self.optimizer_params[MAX_GRAD_NORM] = 0.0
venv/lib/python3.10/site-packages/deepspeed/runtime/constants.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ #############################################
7
+ # Routes
8
+ #############################################
9
+ ROUTE_TRAIN = "train"
10
+ ROUTE_EVAL = "eval"
11
+ ROUTE_PREDICT = "predict"
12
+ ROUTE_ENCODE = "encode"
13
+
14
+ #############################################
15
+ # Batch size
16
+ #############################################
17
+ TRAIN_BATCH_SIZE = "train_batch_size"
18
+ TRAIN_BATCH_SIZE_DEFAULT = None
19
+
20
+ #############################################
21
+ # Sparse attention
22
+ #############################################
23
+ SPARSE_ATTENTION = "sparse_attention"
24
+ SPARSE_DENSE_MODE = "dense"
25
+ SPARSE_FIXED_MODE = "fixed"
26
+ SPARSE_VARIABLE_MODE = "variable"
27
+ SPARSE_BIGBIRD_MODE = "bigbird"
28
+ SPARSE_BSLONGFORMER_MODE = "bslongformer"
29
+ SPARSE_MODE = "mode"
30
+ SPARSE_MODE_DEFAULT = SPARSE_FIXED_MODE
31
+ SPARSE_BLOCK = "block"
32
+ SPARSE_BLOCK_DEFAULT = 16
33
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD = "different_layout_per_head"
34
+ SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT = False
35
+ SPARSE_NUM_LOCAL_BLOCKS = "num_local_blocks"
36
+ SPARSE_NUM_LOCAL_BLOCKS_DEFAULT = 4
37
+ SPARSE_NUM_GLOBAL_BLOCKS = "num_global_blocks"
38
+ SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT = 1
39
+ SPARSE_ATTENTION_TYPE = "attention"
40
+ SPARSE_ATTENTION_TYPE_DEFAULT = "bidirectional"
41
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION = "horizontal_global_attention"
42
+ SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT = False
43
+ SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS = "num_different_global_patterns"
44
+ SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT = 1
45
+ SPARSE_NUM_RANDOM_BLOCKS = "num_random_blocks"
46
+ SPARSE_NUM_RANDOM_BLOCKS_DEFAULT = 0
47
+ SPARSE_LOCAL_WINDOW_BLOCKS = "local_window_blocks"
48
+ SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT = [4]
49
+ SPARSE_GLOBAL_BLOCK_INDICES = "global_block_indices"
50
+ SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT = [0]
51
+ SPARSE_GLOBAL_BLOCK_END_INDICES = "global_block_end_indices"
52
+ SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT = None
53
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS = "num_sliding_window_blocks"
54
+ SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT = 3
55
+
56
+ #############################################
57
+ # Optimizer and lr scheduler
58
+ #############################################
59
+ OPTIMIZER = "optimizer"
60
+ OPTIMIZER_TYPE_DEFAULT = None
61
+ OPTIMIZER_PARAMS = "params"
62
+ TYPE = "type"
63
+ LEGACY_FUSION = "legacy_fusion"
64
+ LEGACY_FUSION_DEFAULT = False
65
+ SCHEDULER = "scheduler"
66
+ SCHEDULER_TYPE_DEFAULT = None
67
+ SCHEDULER_PARAMS = "params"
68
+ MAX_GRAD_NORM = 'max_grad_norm'
69
+
70
+ #############################################
71
+ # Optimizer and lr scheduler
72
+ #############################################
73
+ ZERO_ALLOW_UNTESTED_OPTIMIZER = "zero_allow_untested_optimizer"
74
+ ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT = False
75
+ ZERO_FORCE_DS_CPU_OPTIMIZER = "zero_force_ds_cpu_optimizer"
76
+ ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT = True
77
+
78
+ # Steps
79
+ STEPS_PER_PRINT = "steps_per_print"
80
+ STEPS_PER_PRINT_DEFAULT = 10
81
+
82
+ #########################################
83
+ # Training micro batch size per GPU
84
+ #########################################
85
+ # Batch size for one training step. This is used when the
86
+ # TRAIN_BATCH_SIZE cannot fit in GPU memory to determine
87
+ # the number of gradient accumulation steps. By default, this
88
+ # is set to None. Users can configure in ds_config.json as below example:
89
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU = '''
90
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format:
91
+ "train_micro_batch_size_per_gpu": 1
92
+ '''
93
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu"
94
+ TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = None
95
+
96
+ #########################################
97
+ # Gradient Accumulation
98
+ #########################################
99
+ # Gradient accumulation feature. By default, this feature is not enabled.
100
+ # Users can configure in ds_config.json as below example:
101
+ GRADIENT_ACCUMULATION_FORMAT = '''
102
+ Gradient Accumulation should be of the format:
103
+ "gradient_accumulation_steps": 1
104
+ '''
105
+ GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
106
+ GRADIENT_ACCUMULATION_STEPS_DEFAULT = None
107
+
108
+ # DeepSpeed CSR gradient sparsity
109
+ SPARSE_GRADIENTS = "sparse_gradients"
110
+ SPARSE_GRADIENTS_DEFAULT = False
111
+
112
+ #########################################
113
+ # BFLOAT16 support
114
+ #########################################
115
+ # BFLOAT16 feature. By default, this feature is not enabled.
116
+ # Users can configure in ds_config.json as below example:
117
+ BFLOAT16_FORMAT = '''
118
+ BFLOAT16 parameters should be of the format:
119
+ "bf16": {
120
+ "enabled": true
121
+ }
122
+ '''
123
+ BFLOAT16 = "bf16"
124
+ BFLOAT16_OLD = "bfloat16" # keeping for backwards compatibility
125
+
126
+ BFLOAT16_ENABLED = "enabled"
127
+ BFLOAT16_ENABLED_DEFAULT = False
128
+
129
+ # BFLOAT16 optimizer immediate gradient update
130
+ BFLOAT16_IMMEDIATE_GRAD_UPDATE = "immediate_grad_update"
131
+ BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT = False
132
+
133
+ #########################################
134
+ # FP16 support
135
+ #########################################
136
+ # FP16 feature. By default, this feature is not enabled.
137
+ # Users can configure in ds_config.json as below example:
138
+ FP16_FORMAT = '''
139
+ FP16 parameters should be of the format:
140
+ "fp16": {
141
+ "enabled": true,
142
+ "auto_cast": false,
143
+ "loss_scale": 0,
144
+ "initial_scale_power": 16,
145
+ "loss_scale_window": 1000,
146
+ "hysteresis": 2,
147
+ "consecutive_hysteresis": false,
148
+ "min_loss_scale": 1
149
+ }
150
+ '''
151
+ FP16 = "fp16"
152
+
153
+ FP16_ENABLED = "enabled"
154
+ FP16_ENABLED_DEFAULT = False
155
+
156
+ # FP16 loss scale, zero means using dynamic scaling
157
+ FP16_LOSS_SCALE = "loss_scale"
158
+ FP16_LOSS_SCALE_DEFAULT = 0
159
+
160
+ FP16_AUTO_CAST = "auto_cast"
161
+ FP16_AUTO_CAST_DEFAULT = False
162
+
163
+ # FP16 initial dynamic scale loss power
164
+ FP16_INITIAL_SCALE_POWER = "initial_scale_power"
165
+ FP16_INITIAL_SCALE_POWER_DEFAULT = 16
166
+
167
+ # FP16 loss scale window
168
+ FP16_LOSS_SCALE_WINDOW = "loss_scale_window"
169
+ FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000
170
+
171
+ # FP16 hysteresis
172
+ FP16_HYSTERESIS = "hysteresis"
173
+ FP16_HYSTERESIS_DEFAULT = 2
174
+
175
+ # FP16 consecutive hysteresis
176
+ FP16_CONSECUTIVE_HYSTERESIS = "consecutive_hysteresis"
177
+ FP16_CONSECUTIVE_HYSTERESIS_DEFAULT = False
178
+
179
+ # FP16 min loss scale
180
+ FP16_MIN_LOSS_SCALE = "min_loss_scale"
181
+ FP16_MIN_LOSS_SCALE_DEFAULT = 1
182
+
183
+ # FP16 master and grads
184
+ FP16_MASTER_WEIGHTS_AND_GRADS = "fp16_master_weights_and_grads"
185
+ FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT = False
186
+
187
+ #########################################
188
+ # Apex AMP support
189
+ #########################################
190
+ # Use Apex AMP for mixed precision support, all parameters (other than 'enabled') will be passed to
191
+ # amp.initialize(model, optimizer, **amp_params)
192
+ # See apex documentation for supported parameters/features: https://nvidia.github.io/apex/amp.html#apex.amp.initialize
193
+ AMP_FORMAT = '''
194
+ "amp" {
195
+ "enabled: true,
196
+ "opt_level": "O1",
197
+ ...
198
+ }
199
+ '''
200
+ AMP = "amp"
201
+
202
+ AMP_ENABLED = "enabled"
203
+ AMP_ENABLED_DEFAULT = False
204
+
205
+ #########################################
206
+ # Gradient clipping
207
+ #########################################
208
+ # Gradient clipping. By default, this feature is not enabled.
209
+ # Users can configure in ds_config.json as below example:
210
+ GRADIENT_CLIPPING_FORMAT = '''
211
+ Gradient clipping should be enabled as:
212
+ "gradient_clipping": 1.0
213
+ '''
214
+ GRADIENT_CLIPPING = 'gradient_clipping'
215
+ GRADIENT_CLIPPING_DEFAULT = 0.
216
+
217
+ #########################################
218
+ # Capture graph for short kernels sequences
219
+ #########################################
220
+ # Graph harvesting. By default, this feature is not enabled.
221
+ # Users can configure in ds_config.json as below example:
222
+ GRAPH_HARVESTING_FORMAT = '''
223
+ Graph harvesting should be enabled as:
224
+ "graph_harvesting": true
225
+ '''
226
+ GRAPH_HARVESTING = 'graph_harvesting'
227
+ GRAPH_HARVESTING_DEFAULT = False
228
+
229
+ #########################################
230
+ # Communication data type
231
+ #########################################
232
+ # Supported types: ['none', 'fp16', 'fp32']
233
+ # By default, this feature is not enabled ('none' value)
234
+ # Users can configure in ds_config.json as below example:
235
+ COMMUNICATION_DATA_TYPE_FORMAT = '''
236
+ Communication data type should be set as:
237
+ "communication_data_type": "fp32"
238
+ '''
239
+ COMMUNICATION_DATA_TYPE = "communication_data_type"
240
+ COMMUNICATION_DATA_TYPE_DEFAULT = None
241
+
242
+ ###########################################################
243
+ # Gradient communication data type for sequence parallelism
244
+ ###########################################################
245
+ # Supported types: ['fp16', 'bf16','fp32']
246
+ # Default value is fp32
247
+ # Users can configure in ds_config.json as below example:
248
+ SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_FORMAT = '''
249
+ Optional comm data type for seq paralleism should be set as:
250
+ "seq_parallel_communication_data_type": "fp32"
251
+ '''
252
+ SEQ_PARALLEL_COMMUNICATION_DATA_TYPE = "seq_parallel_comm_data_type"
253
+ SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_DEFAULT = "fp32"
254
+
255
+ #########################################
256
+ # Scale/predivide gradients before allreduce
257
+ #########################################
258
+ # Prescale gradients. By default, this feature is not enabled.
259
+ # Users can configure in ds_config.json as below example:
260
+ PRESCALE_GRADIENTS_FORMAT = '''
261
+ Gradient prescaling should be enabled as:
262
+ "prescale_gradients": true
263
+ '''
264
+ PRESCALE_GRADIENTS = "prescale_gradients"
265
+ PRESCALE_GRADIENTS_DEFAULT = False
266
+
267
+ GRADIENT_PREDIVIDE_FACTOR_FORMAT = '''
268
+ Gradient predivide factor should be enabled as:
269
+ "gradient_predivide_factor": 1.0
270
+ '''
271
+ GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor"
272
+ GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0
273
+
274
+ #########################################
275
+ # Disable AllGather
276
+ #########################################
277
+ # Disable AllGather. By default, this feature is not enabled.
278
+ # Users can configure in ds_config.json as below example:
279
+ DISABLE_ALLGATHER_FORMAT = '''
280
+ Disable AllGather should be enabled as:
281
+ "disable_allgather": true
282
+ '''
283
+ DISABLE_ALLGATHER = "disable_allgather"
284
+ DISABLE_ALLGATHER_DEFAULT = False
285
+
286
+ #########################################
287
+ # Dump DeepSpeed state
288
+ #########################################
289
+ # Dump State. By default, this feature is not enabled.
290
+ # Users can configure in ds_config.json as below example:
291
+ DUMP_STATE_FORMAT = '''
292
+ Dump state should be enabled as:
293
+ "dump_state": true
294
+ '''
295
+ DUMP_STATE = 'dump_state'
296
+ DUMP_STATE_DEFAULT = False
297
+
298
+ #########################################
299
+ # Vocabulary size
300
+ #########################################
301
+ # Vocabulary size.
302
+ # Users can configure in ds_config.json as below example:
303
+ VOCABULARY_SIZE_FORMAT = '''
304
+ Vocabulary size can be specified as:
305
+ "vocabulary_size": 1024
306
+ '''
307
+ VOCABULARY_SIZE = 'vocabulary_size'
308
+ VOCABULARY_SIZE_DEFAULT = None
309
+
310
+ #########################################
311
+ # Wall block breakdown
312
+ #########################################
313
+ # Wall clock breakdown. By default, this feature is not enabled.
314
+ # Users can configure in ds_config.json as below example:
315
+ WALL_CLOCK_BREAKDOWN_FORMAT = '''
316
+ Wall block breakdown should be enabled as:
317
+ "wall_clock_breakdown": true
318
+ '''
319
+ WALL_CLOCK_BREAKDOWN = 'wall_clock_breakdown'
320
+ WALL_CLOCK_BREAKDOWN_DEFAULT = False
321
+
322
+ MEMORY_BREAKDOWN = 'memory_breakdown'
323
+ MEMORY_BREAKDOWN_DEFAULT = False
324
+
325
+ #########################################
326
+ # Eigenvalue
327
+ #########################################
328
+ # Eigenvalue computation. By default, this feature is not enabled.
329
+ # Users can configure in ds_config.json as below example:
330
+ EIGENVALUE_FORMAT = '''
331
+ Tensorboard can be specified as:
332
+ "eigenvalue": {
333
+ "enabled": true,
334
+ "verbose": true,
335
+ "max_iter": 100,
336
+ "tol": 1e-2,
337
+ "stability": 1e-6
338
+ }
339
+ '''
340
+ EIGENVALUE = "eigenvalue"
341
+
342
+ # Tensorboard enable signal
343
+ EIGENVALUE_ENABLED = "enabled"
344
+ EIGENVALUE_ENABLED_DEFAULT = False
345
+
346
+ EIGENVALUE_VERBOSE = "verbose"
347
+ EIGENVALUE_VERBOSE_DEFAULT = False
348
+
349
+ EIGENVALUE_MAX_ITER = "max_iter"
350
+ EIGENVALUE_MAX_ITER_DEFAULT = 100
351
+
352
+ EIGENVALUE_TOL = "tol"
353
+ EIGENVALUE_TOL_DEFAULT = 1e-2
354
+
355
+ EIGENVALUE_STABILITY = "stability"
356
+ EIGENVALUE_STABILITY_DEFAULT = 1e-6
357
+
358
+ EIGENVALUE_GAS_BOUNDARY_RESOLUTION = "gas_boundary_resolution"
359
+ EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT = 1
360
+
361
+ EIGENVALUE_LAYER_NAME = "layer_name"
362
+ EIGENVALUE_LAYER_NAME_DEFAULT = "bert.encoder.layer"
363
+
364
+ EIGENVALUE_LAYER_NUM = "layer_num"
365
+ EIGENVALUE_LAYER_NUM_DEFAULT = 0
366
+
367
+ #########################################
368
+ # Progressive Layer Drop (PLD)
369
+ #########################################
370
+ PROGRESSIVE_LAYER_DROP = "progressive_layer_drop"
371
+
372
+ # PLD enable signal
373
+ PLD_ENABLED = "enabled"
374
+ PLD_ENABLED_DEFAULT = False
375
+
376
+ PLD_THETA = "theta"
377
+ PLD_THETA_DEFAULT = 1.0
378
+
379
+ PLD_GAMMA = "gamma"
380
+ PLD_GAMMA_DEFAULT = 0.001
381
+
382
+
383
+ #########################################
384
+ # Validation modes
385
+ #########################################
386
+ class ValidationMode:
387
+ WARN = "WARN"
388
+ IGNORE = "IGNORE"
389
+ FAIL = "FAIL"
390
+
391
+
392
+ #########################################
393
+ # Checkpoint config params
394
+ #########################################
395
+ # "checkpoint": {
396
+ # tag_validation=["Ignore"|"Warn"|"Fail"]
397
+ # load_universal=false
398
+ # use_node_local_storage=false
399
+ # parallel_write: {
400
+ # pipeline_stage: [True|False]
401
+ # }
402
+ # }
403
+ CHECKPOINT = "checkpoint"
404
+ CHECKPOINT_TAG_VALIDATION = "tag_validation"
405
+ CHECKPOINT_TAG_VALIDATION_DEFAULT = ValidationMode.WARN
406
+ CHECKPOINT_TAG_VALIDATION_MODES = [ValidationMode.WARN, ValidationMode.IGNORE, ValidationMode.FAIL]
407
+
408
+ LOAD_UNIVERSAL_CHECKPOINT = "load_universal"
409
+ LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False
410
+
411
+ USE_NODE_LOCAL_STORAGE_CHECKPOINT = "use_node_local_storage"
412
+ USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT = False
413
+
414
+ CHECKPOINT_PARALLEL_WRITE = "parallel_write"
415
+ CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage"
416
+ CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False
417
+
418
+ #########################################
419
+ # Data types config params
420
+ #########################################
421
+ # "data_types": {
422
+ # grad_accum_dtype=["bf16"|"fp16"|"fp32"]
423
+ # }
424
+ # }
425
+
426
+ DATA_TYPES = "data_types"
427
+ GRAD_ACCUM_DTYPE = "grad_accum_dtype"
428
+ GRAD_ACCUM_DTYPE_DEFAULT = None
429
+
430
+ #########################################
431
+ # Drop the last incomplete Batch
432
+ # #########################################
433
+ # dataloader_drop_last. By default, this feature is not enabled.
434
+ # Users can configure in ds_config.json as below example:
435
+ DATALOADER_DROP_LAST_FORMAT = '''
436
+ The last incomplete batch can be dropped by setting:
437
+ "dataloader_drop_last": True
438
+ '''
439
+ DATALOADER_DROP_LAST = "dataloader_drop_last"
440
+ DATALOADER_DROP_LAST_DEFAULT = False
441
+
442
+ #########################################
443
+ # PIPELINE PARALLELISM
444
+ #########################################
445
+ PIPE_REPLICATED = 'ds_pipe_replicated'
446
+
447
+ #########################################
448
+ # DATA PARALLELISM
449
+ #########################################
450
+ DATA_PARALLEL_GROUP = "data_parallel_group"
451
+ GLOBAL_RANK = "global_rank"
452
+
453
+ #########################################
454
+ # EXPERT-DATA PARALLELISM TOPO Config
455
+ #########################################
456
+ USE_DATA_BEFORE_EXPERT_PARALLEL = "use_data_before_expert_parallelism"
457
+ USE_DATA_BEFORE_EXPERT_PARALLEL_DEFAULT = False
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/config.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .constants import *
7
+ import copy
8
+ from ..config_utils import get_scalar_param
9
+
10
+
11
+ # TODO: Reducing config verbosity by returning None or {} when disabled.
12
+ # One challenge is that we still need to somehow include the default values,
13
+ # for example the *_ENABLED has default of false.
14
+ def get_data_efficiency_config(param_dict):
15
+ output = {}
16
+ output[DATA_EFFICIENCY_ENABLED] = get_data_efficiency_enabled(param_dict)
17
+ output[DATA_EFFICIENCY_SEED] = get_data_efficiency_seed(param_dict)
18
+ if DATA_EFFICIENCY not in param_dict.keys():
19
+ param_dict[DATA_EFFICIENCY] = {}
20
+ sub_param_dict = param_dict[DATA_EFFICIENCY]
21
+ output[DATA_SAMPLING] = get_data_sampling(sub_param_dict)
22
+ output[DATA_ROUTING] = get_data_routing(sub_param_dict)
23
+
24
+ return output
25
+
26
+
27
+ def get_data_efficiency_enabled(param_dict):
28
+ if DATA_EFFICIENCY in param_dict.keys():
29
+ return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_ENABLED, DATA_EFFICIENCY_ENABLED_DEFAULT)
30
+ else:
31
+ return False
32
+
33
+
34
+ def get_data_efficiency_seed(param_dict):
35
+ if DATA_EFFICIENCY in param_dict.keys():
36
+ return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_SEED, DATA_EFFICIENCY_SEED_DEFAULT)
37
+ else:
38
+ return DATA_EFFICIENCY_SEED_DEFAULT
39
+
40
+
41
+ def get_data_sampling(param_dict):
42
+ output = {}
43
+ output[DATA_SAMPLING_ENABLED] = get_data_sampling_enabled(param_dict)
44
+ output[DATA_SAMPLING_NUM_EPOCHS] = get_data_sampling_num_epochs(param_dict)
45
+ output[DATA_SAMPLING_NUM_WORKERS] = get_data_sampling_num_workers(param_dict)
46
+ if DATA_SAMPLING not in param_dict.keys():
47
+ param_dict[DATA_SAMPLING] = {}
48
+ sub_param_dict = param_dict[DATA_SAMPLING]
49
+ output[CURRICULUM_LEARNING] = get_curriculum_learning(sub_param_dict)
50
+
51
+ return output
52
+
53
+
54
+ def get_data_sampling_enabled(param_dict):
55
+ if DATA_SAMPLING in param_dict.keys():
56
+ return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_ENABLED, DATA_SAMPLING_ENABLED_DEFAULT)
57
+ else:
58
+ return False
59
+
60
+
61
+ def get_data_sampling_num_epochs(param_dict):
62
+ if DATA_SAMPLING in param_dict.keys():
63
+ return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_EPOCHS, DATA_SAMPLING_NUM_EPOCHS_DEFAULT)
64
+ else:
65
+ return DATA_SAMPLING_NUM_EPOCHS_DEFAULT
66
+
67
+
68
+ def get_data_sampling_num_workers(param_dict):
69
+ if DATA_SAMPLING in param_dict.keys():
70
+ return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_WORKERS,
71
+ DATA_SAMPLING_NUM_WORKERS_DEFAULT)
72
+ else:
73
+ return DATA_SAMPLING_NUM_WORKERS_DEFAULT
74
+
75
+
76
+ def get_curriculum_learning(param_dict):
77
+ output = {}
78
+ output[CURRICULUM_LEARNING_ENABLED] = get_curriculum_learning_enabled(param_dict)
79
+ if CURRICULUM_LEARNING not in param_dict.keys():
80
+ param_dict[CURRICULUM_LEARNING] = {}
81
+ sub_param_dict = param_dict[CURRICULUM_LEARNING]
82
+ if output[CURRICULUM_LEARNING_ENABLED]:
83
+ assert CURRICULUM_LEARNING_METRICS in sub_param_dict.keys(
84
+ ), f"Curriculum learning is enabled, {CURRICULUM_LEARNING_METRICS} must be specified"
85
+ for key, val in get_curriculum_learning_params(param_dict).items():
86
+ output[key] = val
87
+ return output
88
+
89
+
90
+ def get_curriculum_learning_enabled(param_dict):
91
+ if CURRICULUM_LEARNING in param_dict.keys():
92
+ return get_scalar_param(param_dict[CURRICULUM_LEARNING], CURRICULUM_LEARNING_ENABLED,
93
+ CURRICULUM_LEARNING_ENABLED_DEFAULT)
94
+ else:
95
+ return False
96
+
97
+
98
+ def get_curriculum_learning_params(param_dict):
99
+ if CURRICULUM_LEARNING in param_dict.keys():
100
+ curriculum_learning_params = copy.copy(param_dict[CURRICULUM_LEARNING])
101
+ curriculum_learning_params.pop(CURRICULUM_LEARNING_ENABLED)
102
+ return curriculum_learning_params
103
+ else:
104
+ return {}
105
+
106
+
107
+ def get_curriculum_enabled_legacy(param_dict):
108
+ if CURRICULUM_LEARNING_LEGACY in param_dict.keys():
109
+ return get_scalar_param(param_dict[CURRICULUM_LEARNING_LEGACY], CURRICULUM_ENABLED_LEGACY,
110
+ CURRICULUM_ENABLED_DEFAULT_LEGACY)
111
+ else:
112
+ return False
113
+
114
+
115
+ def get_curriculum_params_legacy(param_dict):
116
+ if CURRICULUM_LEARNING_LEGACY in param_dict.keys():
117
+ curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING_LEGACY])
118
+ curriculum_params.pop(CURRICULUM_ENABLED_LEGACY)
119
+ return curriculum_params
120
+ else:
121
+ return False
122
+
123
+
124
+ def get_data_routing(param_dict):
125
+ output = {}
126
+ output[DATA_ROUTING_ENABLED] = get_data_routing_enabled(param_dict)
127
+ if DATA_ROUTING not in param_dict.keys():
128
+ param_dict[DATA_ROUTING] = {}
129
+ sub_param_dict = param_dict[DATA_ROUTING]
130
+ output[RANDOM_LTD] = get_random_ltd(sub_param_dict)
131
+
132
+ return output
133
+
134
+
135
+ def get_data_routing_enabled(param_dict):
136
+ if DATA_ROUTING in param_dict.keys():
137
+ return get_scalar_param(param_dict[DATA_ROUTING], DATA_ROUTING_ENABLED, DATA_ROUTING_ENABLED_DEFAULT)
138
+ else:
139
+ return False
140
+
141
+
142
+ def get_random_ltd(param_dict):
143
+ output = {}
144
+ output[RANDOM_LTD_ENABLED] = RANDOM_LTD_ENABLED_DEFAULT
145
+ output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE] = {}
146
+ output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][
147
+ RANDOM_LTD_LAYER_TOKEN_LR_ENABLED] = RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT
148
+ if get_random_ltd_enabled(param_dict):
149
+ output[RANDOM_LTD_ENABLED] = get_random_ltd_enabled(param_dict)
150
+ for key, val in get_random_ltd_params(param_dict).items():
151
+ output[key] = val
152
+ return output
153
+
154
+
155
+ def get_random_ltd_enabled(param_dict):
156
+ if RANDOM_LTD in param_dict.keys():
157
+ return get_scalar_param(param_dict[RANDOM_LTD], RANDOM_LTD_ENABLED, RANDOM_LTD_ENABLED_DEFAULT)
158
+ else:
159
+ return False
160
+
161
+
162
+ def get_random_ltd_params(param_dict):
163
+ if RANDOM_LTD in param_dict.keys():
164
+ random_ltd_params = copy.copy(param_dict[RANDOM_LTD])
165
+ random_ltd_params.pop(RANDOM_LTD_ENABLED)
166
+ return random_ltd_params
167
+ else:
168
+ return {}
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/constants.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Data efficiency library
7
+ See sample config at https://www.deepspeed.ai/docs/config-json/data-efficiency
8
+ """
9
+ DATA_EFFICIENCY = "data_efficiency"
10
+ DATA_EFFICIENCY_ENABLED = "enabled"
11
+ DATA_EFFICIENCY_ENABLED_DEFAULT = False
12
+ DATA_EFFICIENCY_SEED = "seed"
13
+ DATA_EFFICIENCY_SEED_DEFAULT = 1234
14
+
15
+ #########################################
16
+ # Data efficiency - Data Sampling
17
+ #########################################
18
+ DATA_SAMPLING = "data_sampling"
19
+ DATA_SAMPLING_ENABLED = "enabled"
20
+ DATA_SAMPLING_ENABLED_DEFAULT = False
21
+ DATA_SAMPLING_NUM_EPOCHS = "num_epochs"
22
+ DATA_SAMPLING_NUM_EPOCHS_DEFAULT = 1000
23
+ DATA_SAMPLING_NUM_WORKERS = "num_workers"
24
+ DATA_SAMPLING_NUM_WORKERS_DEFAULT = 0
25
+
26
+ #########################################
27
+ # Data efficiency - Data Sampling - Curriculum Learning
28
+ #########################################
29
+ CURRICULUM_LEARNING = "curriculum_learning"
30
+ CURRICULUM_LEARNING_ENABLED = "enabled"
31
+ CURRICULUM_LEARNING_ENABLED_DEFAULT = False
32
+ CURRICULUM_LEARNING_CLUSTER_PATH = "data_cluster_path"
33
+ CURRICULUM_LEARNING_METRICS = "curriculum_metrics"
34
+ CURRICULUM_LEARNING_SAMPLE_PATH = "index_to_sample_path"
35
+ CURRICULUM_LEARNING_METRIC_PATH = "index_to_metric_path"
36
+ CURRICULUM_LEARNING_CLUSTERING_TYPE = "clustering_type"
37
+ CURRICULUM_LEARNING_SINGLE_CLUSTER = "single_cluster"
38
+ CURRICULUM_LEARNING_CLUSTER_PREFIX = "cluster"
39
+ CURRICULUM_LEARNING_DIFFICULTY_TYPE = "difficulty_type"
40
+ CURRICULUM_LEARNING_VALUE_BASED = "value"
41
+ CURRICULUM_LEARNING_PERCENTILE_BASED = "percentile"
42
+ CURRICULUM_LEARNING_MIN_DIFFICULTY = "min_difficulty"
43
+ CURRICULUM_LEARNING_MAX_DIFFICULTY = "max_difficulty"
44
+ CURRICULUM_LEARNING_SCHEDULE_TYPE = "schedule_type"
45
+ CURRICULUM_LEARNING_SCHEDULE_CONFIG = "schedule_config"
46
+ CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY = "difficulty"
47
+ CURRICULUM_LEARNING_SCHEDULE_MAX_STEP = "max_step"
48
+ CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP = "total_curriculum_step"
49
+ CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP = "difficulty_step"
50
+ CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE = "root_degree"
51
+ CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE = "fixed_discrete"
52
+ CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT = "fixed_root"
53
+ CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR = "fixed_linear"
54
+ CURRICULUM_LEARNING_SCHEDULE_CUSTOM = "custom"
55
+ CURRICULUM_LEARNING_CURRENT_DIFFICULTY = "current_difficulty"
56
+
57
+ CURRICULUM_LEARNING_BATCH = "batch"
58
+ CURRICULUM_LEARNING_CONSUMED_SAMPLES = "consumed_samples"
59
+ CURRICULUM_LEARNING_STEP = "curriculum_step"
60
+ CURRICULUM_LEARNING_CURRENT_DIFFICULTIES = "current_difficulties"
61
+ CURRICULUM_LEARNING_DATA_CLUSTER_PATHS = "data_cluster_paths"
62
+ CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION = "data_cluster_current_position"
63
+ CURRICULUM_LEARNING_NP_RNG_STATE = "np_rng_state"
64
+
65
+ #########################################
66
+ # Curriculum Learning legacy implementation
67
+ #########################################
68
+ CURRICULUM_LEARNING_LEGACY = "curriculum_learning"
69
+
70
+ CURRICULUM_ENABLED_LEGACY = "enabled"
71
+ CURRICULUM_ENABLED_DEFAULT_LEGACY = False
72
+
73
+ #########################################
74
+ # Data efficiency - Data Routing
75
+ #########################################
76
+ DATA_ROUTING = "data_routing"
77
+ DATA_ROUTING_ENABLED = "enabled"
78
+ DATA_ROUTING_ENABLED_DEFAULT = False
79
+
80
+ #########################################
81
+ # Data efficiency - Data Routing - Random LTD
82
+ #########################################
83
+ RANDOM_LTD = "random_ltd"
84
+ RANDOM_LTD_ENABLED = "enabled"
85
+ RANDOM_LTD_ENABLED_DEFAULT = False
86
+
87
+ RANDOM_LTD_MODEL_MASK_NAME = "model_mask_name"
88
+ RANDOM_LTD_MODEL_TYPE = "model_type"
89
+ RANDOM_LTD_MICRO_BATCH_SIZE = "micro_batch_size"
90
+ RANDOM_LTD_GLOBAL_BATCH_SIZE = "global_batch_size"
91
+ RANDOM_LTD_SAMPLE_INDEX = "sample_idx"
92
+ RANDOM_LTD_ATTENTION_MASK = "attention_mask"
93
+ RANDOM_LTD_HIDDEN_STATE_ORDER = "hidden_state_order"
94
+ RANDOM_LTD_LAYER_NUM = "random_ltd_layer_num"
95
+ RANDOM_LTD_LAYER_ID = "random_ltd_layer_id"
96
+ RANDOM_LTD_TOTAL_LAYER_NUM = "total_layer_num"
97
+ RANDOM_LTD_CONSUMED_LAYER_TOKENS = "consumed_layer_tokens"
98
+
99
+ # scheduler
100
+ RANDOM_LTD_SCHEDULER = "random_ltd_schedule"
101
+ RANDOM_LTD_MAX_VALUE = "max_value"
102
+ RANDOM_LTD_MIN_VALUE = "min_value"
103
+ RANDOM_LTD_CURRENT_VALUE = "current_value"
104
+ RANDOM_LTD_SCHEDULE_CONFIG = "schedule_config"
105
+ RANDOM_LTD_INCREASE_STEP = "seq_per_step"
106
+ RANDOM_LTD_REQUIRE_STEP = "require_steps"
107
+ RANDOM_LTD_SCHEDULER_TYPE = "schedule_type"
108
+ RANDOM_LTD_CURR_STEP = "current_steps"
109
+
110
+ # learning rate schedulers
111
+ RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE = "layer_token_lr_schedule"
112
+ RANDOM_LTD_LAYER_TOKEN_LR_ENABLED = "enabled"
113
+ RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT = False
114
+ RANDOM_LTD_TOTAL_LAYER_TOKENS = "total_layer_tokens"
115
+ RANDOM_LTD_WARMUP_TYPE = "warmup_type"
116
+ RANDOM_LTD_WARMUP_LAYER_TOKENS = "warmup_layer_tokens"
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/curriculum_scheduler.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import math
7
+ from deepspeed.utils import logger
8
+ from .constants import *
9
+
10
+
11
+ class CurriculumScheduler(object):
12
+
13
+ def __init__(self, config):
14
+ super().__init__()
15
+ self.state = {}
16
+ assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \
17
+ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'"
18
+ assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \
19
+ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'"
20
+ assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \
21
+ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'"
22
+ self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
23
+ self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[CURRICULUM_LEARNING_MAX_DIFFICULTY]
24
+ self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
25
+ self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[CURRICULUM_LEARNING_SCHEDULE_TYPE]
26
+ self.first_step = True
27
+ if config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
28
+ """
29
+ The schedule_config is a list of difficulty and a list of max
30
+ step belonging to each difficulty. Example json config:
31
+ "schedule_config": {
32
+ "difficulty": [1,2,3],
33
+ "max_step": [5,10]
34
+ }
35
+ The "max_step" has one less element than "difficulty", because
36
+ the last difficulty will be used for all following steps.
37
+ The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of
38
+ difficulty : [max step for this difficulty, next difficulty].
39
+ """
40
+ assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
41
+ f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'"
42
+ assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
43
+ f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'"
44
+ assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0
45
+ assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0
46
+ assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len(
47
+ config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1
48
+ self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
49
+ elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
50
+ """
51
+ The schedule_config includes:
52
+ total_curriculum_step: how many steps the curriculum learning takes to go
53
+ from min difficulty to max difficulty.
54
+ difficulty_step: the difficulty level determined every time must
55
+ be a multiple of this difficulty_step. This is used to determine
56
+ the step of difficulty increase, and to ensure the use of NVIDIA
57
+ Tensor Core acceleration (requires multiple of 8 (FP16) or
58
+ 16 (INT8)).
59
+ root_degree: the degree of the root function. Degree of 2 means
60
+ square root and degree of 3 means cube root. Degree of 1 is
61
+ equivalent to linear.
62
+ "schedule_config": {
63
+ "total_curriculum_step": 30000,
64
+ "difficulty_step": 8,
65
+ "root_degree": 2
66
+ }
67
+ """
68
+ assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
69
+ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
70
+ assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
71
+ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
72
+ assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
73
+ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'"
74
+ if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
75
+ logger.warning(
76
+ f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
77
+ )
78
+ self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
79
+ elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
80
+ """
81
+ The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the
82
+ root_degree.
83
+ "schedule_config": {
84
+ "total_curriculum_step": 30000,
85
+ "difficulty_step": 8
86
+ }
87
+ """
88
+ assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
89
+ f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
90
+ assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
91
+ f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
92
+ if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
93
+ logger.warning(
94
+ f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
95
+ )
96
+ self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
97
+ elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
98
+ """
99
+ Fully customized schedule. User need to provide a custom schedule
100
+ function by using the set_custom_curriculum_learning_schedule API
101
+ in deepspeed/runtime/engine.py
102
+ """
103
+ self.custom_get_difficulty = None
104
+ else:
105
+ raise RuntimeError('Unsupported curriculum schedule type')
106
+
107
+ def get_current_difficulty(self):
108
+ return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
109
+
110
+ def set_current_difficulty(self, difficulty):
111
+ self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty
112
+
113
+ def set_custom_get_difficulty(self, schedule_function):
114
+ self.custom_get_difficulty = schedule_function
115
+
116
+ def get_state(self):
117
+ return self.state
118
+
119
+ def set_state(self, state):
120
+ self.state = state
121
+
122
+ def __fixed_discrete_get_difficulty(self, global_steps):
123
+ s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
124
+ if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]:
125
+ return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1]
126
+ for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])):
127
+ if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]:
128
+ return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i]
129
+
130
+ def __fixed_root_get_difficulty(self, global_steps, root_degree=None):
131
+ s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
132
+ if root_degree is None:
133
+ root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE]
134
+ next_difficulty = (float(global_steps) / s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**(1.0 / root_degree)
135
+ next_difficulty = math.floor(
136
+ next_difficulty *
137
+ (self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) +
138
+ self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY])
139
+ next_difficulty -= (next_difficulty % s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP])
140
+ next_difficulty = min(next_difficulty, self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY])
141
+ return next_difficulty
142
+
143
+ def get_difficulty(self, global_steps):
144
+ if self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
145
+ return self.__fixed_discrete_get_difficulty(global_steps)
146
+ elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
147
+ return self.__fixed_root_get_difficulty(global_steps, 1)
148
+ elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
149
+ return self.__fixed_root_get_difficulty(global_steps)
150
+ elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
151
+ return self.custom_get_difficulty(global_steps)
152
+ else:
153
+ raise RuntimeError('Unsupported curriculum schedule type')
154
+
155
+ def update_difficulty(self, global_steps):
156
+ if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]:
157
+ self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty(global_steps)
158
+ return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py ADDED
@@ -0,0 +1,880 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import sys
8
+ from collections import defaultdict
9
+ import csv
10
+ import time
11
+ from multiprocessing import Process, Manager
12
+ import numpy as np
13
+ import torch
14
+ from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset
15
+
16
+ import deepspeed.comm as dist
17
+ from deepspeed.utils import logger
18
+ from deepspeed.runtime.data_pipeline.data_sampling.indexed_dataset import MMapIndexedDataset, valid_dtypes
19
+ from deepspeed.runtime.data_pipeline.data_sampling.utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
20
+
21
+
22
+ class DataAnalyzer(object):
23
+
24
+ def __init__(self,
25
+ dataset,
26
+ num_workers=1,
27
+ worker_id=0,
28
+ num_threads=1,
29
+ num_threads_reduce=1,
30
+ specific_threads=[],
31
+ batch_size=1,
32
+ metric_names=[],
33
+ metric_functions=[],
34
+ metric_types=[],
35
+ metric_dtypes=[],
36
+ save_path="./",
37
+ collate_fn=None,
38
+ custom_map_init=None,
39
+ custom_map_update=None,
40
+ custom_map_finalize=None,
41
+ custom_reduce=None,
42
+ sample_indices=None):
43
+ super().__init__()
44
+ self.dataset = dataset
45
+ self.num_workers = num_workers
46
+ self.worker_id = worker_id
47
+ self.num_threads = num_threads
48
+ self.num_threads_reduce = num_threads_reduce
49
+ self.specific_threads = specific_threads
50
+ self.batch_size = batch_size
51
+ self.metric_names = metric_names
52
+ self.metric_functions = metric_functions
53
+ self.metric_types = metric_types
54
+ self.metric_dtypes = metric_dtypes
55
+ self.save_path = save_path
56
+ self.collate_fn = collate_fn
57
+ self.custom_map_init = custom_map_init
58
+ self.custom_map_update = custom_map_update
59
+ self.custom_map_finalize = custom_map_finalize
60
+ self.custom_reduce = custom_reduce
61
+ self.sample_indices = sample_indices
62
+
63
+ def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id):
64
+ metric_results = []
65
+ for m_idx in range(len(metric_names)):
66
+ metric_name, metric_type, metric_dtype = metric_names[m_idx], \
67
+ metric_types[m_idx], metric_dtypes[m_idx]
68
+ assert metric_dtype in valid_dtypes, f"metric_dtype {metric_dtype} not supported. Supported dtypes {valid_dtypes}"
69
+ metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/"
70
+ os.makedirs(metric_save_path, exist_ok=True)
71
+ if metric_type == 'single_value_per_sample':
72
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
73
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype)
74
+ metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample"
75
+ os.system(f"rm -rf {metric_to_sample_fname}*")
76
+ metric_to_sample_dict = defaultdict(list)
77
+ metric_results.append({
78
+ "sample_to_metric_fname": sample_to_metric_fname,
79
+ "sample_to_metric_builder": sample_to_metric_builder,
80
+ "metric_to_sample_fname": metric_to_sample_fname,
81
+ "metric_to_sample_dict": metric_to_sample_dict
82
+ })
83
+ elif metric_type == 'accumulate_value_over_samples':
84
+ metric_value = None
85
+ metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
86
+ metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname})
87
+ return metric_results
88
+
89
+ def update_metric_results(self,
90
+ data,
91
+ metric_types,
92
+ metric_dtypes,
93
+ metric_functions,
94
+ metric_results,
95
+ batch_start_idx=0):
96
+ for m_idx in range(len(metric_types)):
97
+ metric_type, metric_dtype, metric_function, metric_result = metric_types[m_idx], \
98
+ metric_dtypes[m_idx], metric_functions[m_idx], metric_results[m_idx]
99
+ metric_values = metric_function(data)
100
+
101
+ assert torch.is_tensor(metric_values) or isinstance(metric_values, np.ndarray), \
102
+ "metric_function must return a tensor or array"
103
+ assert metric_values.dtype == metric_dtype, \
104
+ f"metric_function result dtype {metric_values.dtype} does not match metric_dtype {metric_dtype}"
105
+ if isinstance(metric_values, np.ndarray):
106
+ metric_values = torch.from_numpy(metric_values)
107
+
108
+ if metric_type == 'single_value_per_sample':
109
+ for row in range(metric_values.size()[0]):
110
+ sample_idx = batch_start_idx + row # sample idx following dataset iteration order
111
+ if isinstance(data, dict) and 'index' in data: # Megatron use case, idx provided in 'index' field
112
+ sample_idx = data['index'][row][0].item()
113
+ elif self.sample_indices is not None: # user defined shuffling of indices
114
+ sample_idx = self.sample_indices[sample_idx]
115
+ metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1))
116
+ metric_result["metric_to_sample_dict"][metric_values[row].item()].append(sample_idx)
117
+ for m_value in metric_result["metric_to_sample_dict"]:
118
+ if len(metric_result["metric_to_sample_dict"][m_value]) > 100:
119
+ metric_fname = metric_result["metric_to_sample_fname"]
120
+ with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
121
+ writer = csv.writer(f)
122
+ writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
123
+ metric_result["metric_to_sample_dict"][m_value] = []
124
+ elif metric_type == 'accumulate_value_over_samples':
125
+ if metric_result["metric_value"] is None:
126
+ metric_result["metric_value"] = metric_values
127
+ else:
128
+ metric_result["metric_value"].add_(metric_values)
129
+
130
+ def finalize_metric_results(self, metric_types, metric_dtypes, metric_results):
131
+ for m_idx in range(len(metric_types)):
132
+ metric_type, metric_dtype, metric_result = metric_types[m_idx], \
133
+ metric_dtypes[m_idx], metric_results[m_idx]
134
+ if metric_type == 'single_value_per_sample':
135
+ metric_fname = metric_result["sample_to_metric_fname"]
136
+ close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname)
137
+ for m_value in metric_result["metric_to_sample_dict"]:
138
+ if len(metric_result["metric_to_sample_dict"][m_value]) > 0:
139
+ metric_fname = metric_result["metric_to_sample_fname"]
140
+ with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
141
+ writer = csv.writer(f)
142
+ writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
143
+ metric_result["metric_to_sample_dict"][m_value] = []
144
+ elif metric_type == 'accumulate_value_over_samples':
145
+ if metric_result["metric_value"] is not None:
146
+ metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"],
147
+ metric_dtype)
148
+ metric_value_builder.add_item(metric_result["metric_value"].reshape(-1))
149
+ close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"])
150
+
151
+ def run_map_helper(self, thread_id):
152
+ start_idx, end_idx = self.thread_splits[thread_id][0], \
153
+ self.thread_splits[thread_id][1]
154
+ logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \
155
+ f"on data subset {start_idx} to {end_idx}")
156
+ thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx)))
157
+ sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False)
158
+ iterator = iter(
159
+ DataLoader(thread_dataset,
160
+ batch_sampler=sampler,
161
+ num_workers=0,
162
+ collate_fn=self.collate_fn,
163
+ pin_memory=False))
164
+ if self.custom_map_init is None:
165
+ metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types,
166
+ self.metric_dtypes, self.save_path, self.worker_id)
167
+ else:
168
+ metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes,
169
+ self.save_path, self.worker_id)
170
+ total_sample = len(thread_dataset)
171
+ processed_sample = 0
172
+ start = time.time()
173
+ while True:
174
+ try:
175
+ data = next(iterator)
176
+ batch_start_idx = start_idx + processed_sample
177
+ if self.custom_map_update is None:
178
+ self.update_metric_results(data, self.metric_types, self.metric_dtypes, self.metric_functions,
179
+ metric_results, batch_start_idx)
180
+ else:
181
+ self.custom_map_update(data, self.metric_types, self.metric_dtypes, self.metric_functions,
182
+ metric_results, batch_start_idx)
183
+ processed_sample += len(data)
184
+ duration = (time.time() - start) / 3600.0
185
+ remain_duration = duration * total_sample / processed_sample - duration
186
+ logger.info(
187
+ f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \
188
+ f"out of {total_sample} processed in {duration:.2f} hr, " \
189
+ f"estimated to finish in {remain_duration:.2f} hr")
190
+ except StopIteration:
191
+ logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file")
192
+ break
193
+ if self.custom_map_finalize is None:
194
+ self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results)
195
+ else:
196
+ self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results)
197
+ logger.info(f"worker {self.worker_id} thread {thread_id}: finished")
198
+
199
+ def run_map(self):
200
+ self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id,
201
+ self.num_threads)
202
+ if len(self.specific_threads) > 0:
203
+ threads_to_run = self.specific_threads
204
+ else:
205
+ threads_to_run = list(range(self.num_threads))
206
+ if self.num_threads > 1:
207
+ p = []
208
+ for thread in threads_to_run:
209
+ p.append(Process(target=self.run_map_helper, args=(thread, )))
210
+ p[thread].start()
211
+
212
+ for thread in threads_to_run:
213
+ p[thread].join()
214
+ else:
215
+ assert self.num_threads == 1
216
+ self.run_map_helper(0)
217
+
218
+ def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples):
219
+ logger.info(f"Checking the value percentiles of metric {metric_name}...")
220
+ processed_samples = 0
221
+ current_percentile = 5
222
+ for key in sorted(num_sample_per_value.keys()):
223
+ processed_samples += num_sample_per_value[key]
224
+ if processed_samples >= total_num_samples * current_percentile / 100.0:
225
+ logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}")
226
+ current_percentile += 5
227
+
228
+ def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path,
229
+ metric_name, return_dict):
230
+ results = []
231
+ for w_idx in range(num_workers):
232
+ for t_idx in range(num_threads):
233
+ if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce:
234
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
235
+ w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
236
+ w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
237
+ unique_v = list(np.unique(w_sample_to_metric))
238
+ sample_to_metric_count = len(w_sample_to_metric)
239
+ logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.")
240
+ results.append([unique_v, sample_to_metric_count])
241
+ return_dict[t_idx_reduce] = results
242
+
243
+ def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype,
244
+ map_worker_thread):
245
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
246
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
247
+ for w_t in map_worker_thread:
248
+ w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/"
249
+ w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
250
+ w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
251
+ for row in range(len(w_data)):
252
+ sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long))
253
+ logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.")
254
+ close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
255
+
256
+ def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype,
257
+ unique_metric_values, num_workers, num_threads):
258
+ index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
259
+ index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
260
+ index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
261
+ index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
262
+ for unique_v in unique_metric_values:
263
+ samples = []
264
+ for w_idx in range(num_workers):
265
+ for t_idx in range(num_threads):
266
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
267
+ w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv"
268
+ if os.path.isfile(w_metric_to_sample_fname):
269
+ with open(w_metric_to_sample_fname, 'r') as f:
270
+ datareader = csv.reader(f)
271
+ for row in datareader:
272
+ samples += [int(x) for x in row]
273
+ index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long))
274
+ index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long))
275
+ logger.info(f"Finished reducing metric {metric_name} value {unique_v}.")
276
+ close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
277
+ close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
278
+
279
+ def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads,
280
+ num_threads_reduce):
281
+ total_num_samples = len(dataset)
282
+ sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1)
283
+ logger.info(
284
+ f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes."
285
+ )
286
+ for m_idx in range(len(metric_names)):
287
+ metric_name, metric_type = metric_names[m_idx], metric_types[m_idx]
288
+ if metric_type == 'single_value_per_sample':
289
+ metric_save_path = f"{save_path}/{metric_name}/"
290
+ sample_to_metric_count = 0
291
+ unique_metric_values = set([])
292
+ manager = Manager()
293
+ return_dict = manager.dict()
294
+ p = []
295
+ for t_idx_reduce in range(num_threads_reduce):
296
+ p.append(
297
+ Process(target=self.merge_gather_map_stats,
298
+ args=(
299
+ num_workers,
300
+ num_threads,
301
+ num_threads_reduce,
302
+ t_idx_reduce,
303
+ metric_save_path,
304
+ metric_name,
305
+ return_dict,
306
+ )))
307
+ p[t_idx_reduce].start()
308
+ for t_idx_reduce in range(num_threads_reduce):
309
+ p[t_idx_reduce].join()
310
+ for t_idx_reduce in range(num_threads_reduce):
311
+ results = return_dict[t_idx_reduce]
312
+ for res in results:
313
+ unique_metric_values = unique_metric_values.union(set(res[0]))
314
+ sample_to_metric_count += res[1]
315
+ value_max = max(unique_metric_values)
316
+ value_min = min(unique_metric_values)
317
+ assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully."
318
+ metric_value_dtype = find_fit_int_dtype(value_min, value_max)
319
+ logger.info(
320
+ f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values."
321
+ )
322
+
323
+ # sample_to_metric
324
+ map_worker_thread = []
325
+ for w_idx in range(num_workers):
326
+ for t_idx in range(num_threads):
327
+ map_worker_thread.append([w_idx, t_idx])
328
+ thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce)
329
+ p = []
330
+ for t_idx_reduce in range(num_threads_reduce):
331
+ start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
332
+ p.append(
333
+ Process(target=self.merge_sample_to_metric,
334
+ args=(
335
+ t_idx_reduce,
336
+ metric_save_path,
337
+ metric_name,
338
+ metric_value_dtype,
339
+ map_worker_thread[start_idx:end_idx],
340
+ )))
341
+ p[t_idx_reduce].start()
342
+ for t_idx_reduce in range(num_threads_reduce):
343
+ p[t_idx_reduce].join()
344
+
345
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
346
+ sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
347
+ for t_idx_reduce in range(num_threads_reduce):
348
+ chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
349
+ logger.info(f"Merging file {chunk_fname}")
350
+ sample_to_metric_builder.merge_file_(chunk_fname)
351
+ close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
352
+ sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True)
353
+ assert len(sample_to_metric) == total_num_samples
354
+
355
+ # metric_to_sample
356
+ unique_metric_values = list(sorted(unique_metric_values))
357
+ thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce)
358
+ p = []
359
+ for t_idx_reduce in range(num_threads_reduce):
360
+ start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
361
+ p.append(
362
+ Process(target=self.merge_metric_to_sample,
363
+ args=(
364
+ t_idx_reduce,
365
+ metric_save_path,
366
+ metric_name,
367
+ sample_idx_dtype,
368
+ metric_value_dtype,
369
+ unique_metric_values[start_idx:end_idx],
370
+ num_workers,
371
+ num_threads,
372
+ )))
373
+ p[t_idx_reduce].start()
374
+ for t_idx_reduce in range(num_threads_reduce):
375
+ p[t_idx_reduce].join()
376
+ index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample"
377
+ index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
378
+ index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric"
379
+ index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
380
+ for t_idx_reduce in range(num_threads_reduce):
381
+ chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
382
+ logger.info(f"Merging file {chunk_is_fname}")
383
+ index_to_sample_builder.merge_file_(chunk_is_fname)
384
+ chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
385
+ logger.info(f"Merging file {chunk_im_fname}")
386
+ index_to_metric_builder.merge_file_(chunk_im_fname)
387
+ close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
388
+ close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
389
+
390
+ num_sample_per_value = DataAnalyzer.output_index_to_sample_percentile(
391
+ index_to_sample_fname, index_to_metric_fname, metric_name, metric_save_path, total_num_samples,
392
+ sample_idx_dtype)
393
+ self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples)
394
+ elif metric_type == 'accumulate_value_over_samples':
395
+ metric_save_path = f"{save_path}/{metric_name}/"
396
+ metric_value = None
397
+ for w_idx in range(num_workers):
398
+ for t_idx in range(num_threads):
399
+ w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
400
+ w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value"
401
+ w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True)
402
+ if metric_value is None:
403
+ metric_value = np.copy(w_metric_value[0])
404
+ else:
405
+ metric_value += np.copy(w_metric_value[0])
406
+ value_max = int(max(metric_value))
407
+ value_min = int(min(metric_value))
408
+ metric_value_dtype = find_fit_int_dtype(value_min, value_max)
409
+ metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
410
+ metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype)
411
+ metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long))
412
+ close_mmap_dataset_builder(metric_value_builder, metric_value_fname)
413
+
414
+ @staticmethod
415
+ def output_index_to_sample_percentile(index_to_sample_fname, index_to_metric_fname, metric_name, metric_save_path,
416
+ total_num_samples, sample_idx_dtype):
417
+ """ read index_to_metric and index_to_sample files and write distribution to index_to_sample_percentage_merged """
418
+ num_sample_per_value = {}
419
+ index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True)
420
+ index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True)
421
+ index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged"
422
+ index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname, sample_idx_dtype)
423
+ for v_idx in range(len(index_to_sample)):
424
+ if v_idx > 0:
425
+ assert index_to_metric[v_idx] > index_to_metric[v_idx - 1]
426
+ num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx])
427
+ assert sum(list(num_sample_per_value.values())) == total_num_samples
428
+ merge_step = max(1, len(index_to_sample) // 100)
429
+ for v_idx in range(0, len(index_to_sample), merge_step):
430
+ merged_samples = np.copy(
431
+ np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))], axis=None))
432
+ index_to_sample_merged_builder.add_item(torch.tensor(merged_samples.astype(np.int64), dtype=torch.long))
433
+ logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.")
434
+ close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname)
435
+ return num_sample_per_value
436
+
437
+ def run_reduce(self):
438
+ if self.custom_reduce is None:
439
+ self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path,
440
+ self.num_workers, self.num_threads, self.num_threads_reduce)
441
+ else:
442
+ self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers,
443
+ self.num_threads, self.num_threads_reduce)
444
+
445
+ def run_map_reduce(self, comm_group=None):
446
+ self.run_map()
447
+ # wait for the mapping operation, where all nodes outputs their own (partial) result files
448
+ dist.barrier(group=comm_group)
449
+ if self.worker_id == 0:
450
+ self.run_reduce()
451
+ # wait for the reduce, where rank 0 merges all (partial) files. Dataset can then be used by all nodes.
452
+ dist.barrier(group=comm_group)
453
+
454
+
455
+ class DistributedDataAnalyzer(object):
456
+
457
+ def __init__(
458
+ self,
459
+ dataset,
460
+ num_workers=1,
461
+ num_threads=1,
462
+ worker_id=0,
463
+ batch_size=1,
464
+ metric_names=[],
465
+ metric_functions=[],
466
+ metric_types=[],
467
+ save_path="./",
468
+ collate_fn=None,
469
+ device='cuda',
470
+ comm_group=None,
471
+ sample_indices=None,
472
+ ) -> None:
473
+ self.dataset = dataset
474
+ self.batch_size = batch_size
475
+ self.metric_names = metric_names
476
+ self.metric_functions = metric_functions
477
+ self.metric_types = metric_types
478
+ self.save_path = save_path
479
+ self.collate_fn = collate_fn
480
+ self.device = device
481
+ self.sample_indices = sample_indices
482
+ self.num_threads = num_threads
483
+ self.worker_id = worker_id
484
+
485
+ if not dist.is_initialized():
486
+ dist.init_distributed()
487
+
488
+ # comm_group and worker_id+num_workers are mutually exclusive
489
+ self.comm_group = comm_group
490
+ if self.comm_group is None:
491
+ # self.comm_group = deepspeed.utils.groups._clone_world_group()
492
+ self.num_workers = num_workers
493
+ self.worker_id = worker_id
494
+ else:
495
+ self.num_workers = self.comm_group.size()
496
+ self.worker_id = self.comm_group.rank()
497
+
498
+ if self.worker_id == 0:
499
+ logger.info(f"Distributed data analyzer initialized with {self.num_workers} workers.")
500
+
501
+ def run_map_helper(self, thread_id=0, metric_queues=None):
502
+ thread_start_idx, thread_end_idx = self.thread_splits[thread_id][0], self.thread_splits[thread_id][1]
503
+ worker_dataset = Subset(self.dataset, list(range(thread_start_idx, thread_end_idx)))
504
+ sampler = BatchSampler(SequentialSampler(worker_dataset), batch_size=self.batch_size, drop_last=False)
505
+ dataloader = DataLoader(dataset=worker_dataset,
506
+ batch_sampler=sampler,
507
+ num_workers=0,
508
+ collate_fn=self.collate_fn,
509
+ pin_memory=False)
510
+
511
+ # set initial results list
512
+ metric_results = []
513
+ for metric_type in self.metric_types:
514
+ assert metric_type in ['single_value_per_sample', 'accumulate_value_over_samples'], \
515
+ f"metric_type {metric_type} not implemented."
516
+ metric_results.append([] if metric_type == 'single_value_per_sample' else None)
517
+
518
+ # iterate dataloader and store metric results
519
+ batch_start_idx = thread_start_idx
520
+ for data in dataloader:
521
+ for m_idx in range(len(self.metric_names)):
522
+ metric_type, metric_function = self.metric_types[m_idx], self.metric_functions[m_idx]
523
+ metric_values = metric_function(data)
524
+ assert torch.is_tensor(metric_values) or isinstance(metric_values, np.ndarray), \
525
+ "metric_function must return a tensor or array"
526
+ if isinstance(metric_values, np.ndarray):
527
+ metric_values = torch.from_numpy(metric_values)
528
+ assert metric_values.dtype in valid_dtypes, \
529
+ f"metric_function result dtype {metric_values.dtype} not supported. Supported dtypes {valid_dtypes}"
530
+
531
+ if metric_type == 'single_value_per_sample':
532
+ for row in range(metric_values.size()[0]):
533
+ value = metric_values[row].item()
534
+ sample_idx = batch_start_idx + row # sample idx following dataset iteration order
535
+ if isinstance(data, dict) and 'index' in data: # Megatron use case
536
+ sample_idx = data['index'][row][0].item()
537
+ elif self.sample_indices is not None: # user defined shuffling of indices
538
+ sample_idx = self.sample_indices[sample_idx]
539
+ metric_results[m_idx].append((value, sample_idx))
540
+ elif metric_type == 'accumulate_value_over_samples':
541
+ if metric_results[m_idx] is None:
542
+ metric_results[m_idx] = metric_values
543
+ else:
544
+ metric_results[m_idx].add_(metric_values)
545
+ batch_start_idx += len(data)
546
+
547
+ if self.num_threads == 1:
548
+ return metric_results
549
+
550
+ # copy metric_results to the shared queue
551
+ assert metric_queues
552
+ for m_idx in range(len(self.metric_names)):
553
+ results = metric_results[m_idx]
554
+ if torch.is_tensor(results):
555
+ results = results.item() if results.dim() == 0 else results.tolist()
556
+ try:
557
+ metric_queues[m_idx].put((thread_id, results))
558
+ except Exception as e:
559
+ logger.error(f"Error putting metric results to queue: {e}")
560
+ sys.exit(1)
561
+
562
+ def run_map_reduce(self):
563
+
564
+ # setup individual dataloaders
565
+ self.worker_splits, self.thread_splits = split_dataset(self.dataset,
566
+ self.num_workers,
567
+ self.worker_id,
568
+ num_threads=self.num_threads)
569
+ node_start_idx, node_end_idx = self.worker_splits[self.worker_id]
570
+ logger.info(f"worker {self.worker_id} working on data subset {node_start_idx} to {node_end_idx}.")
571
+
572
+ if self.num_threads in [0, 1, None]:
573
+ metric_results = self.run_map_helper()
574
+ metric_results = [torch.tensor(m).to(self.device) for m in metric_results]
575
+ else:
576
+
577
+ # create a shared queue of results per metric to be populated by individual threads
578
+ with Manager() as manager:
579
+ metric_queues = [manager.Queue() for _ in self.metric_names]
580
+ threads = [
581
+ Process(target=self.run_map_helper, args=(t, metric_queues)) for t in range(self.num_threads)
582
+ ]
583
+ for thread in threads:
584
+ thread.start()
585
+ for thread in threads:
586
+ thread.join()
587
+
588
+ # gather results from shared queues into metric_results
589
+ metric_results = [None for _ in self.metric_names]
590
+ for m_idx, (queue, metric_type) in enumerate(zip(metric_queues, self.metric_types)):
591
+ while not queue.empty():
592
+ t_idx, t_results = queue.get()
593
+ t_start_idx, t_end_idx = self.thread_splits[t_idx]
594
+ if t_start_idx >= t_end_idx: # no results from this thread
595
+ continue #corner case for small datasets and high thread count
596
+ t_results = torch.tensor(t_results)
597
+ if metric_type == 'single_value_per_sample':
598
+ # add thread results to the metric_results list, ordered by thread idx
599
+ if metric_results[m_idx] is None: # initialize if needed
600
+ metric_results[m_idx] = torch.zeros(node_end_idx - node_start_idx,
601
+ t_results.size(1)).to(self.device)
602
+ metric_results[m_idx][t_start_idx - node_start_idx:t_end_idx - node_start_idx] = t_results
603
+ else:
604
+ if metric_results[m_idx] is None: # initialize if needed
605
+ metric_results[m_idx] = torch.zeros(t_results.size()).to(self.device)
606
+ metric_results[m_idx].add_(t_results)
607
+
608
+ # compute dtype for sample ids
609
+ total_num_samples = len(self.dataset)
610
+ sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1)
611
+ logger.info(f"Total number of data samples: {total_num_samples}.")
612
+ logger.info(f"Will use {sample_idx_dtype} to store the sample indexes.")
613
+
614
+ for m_idx in range(len(self.metric_names)):
615
+ metric_values, metric_name, metric_type = \
616
+ metric_results[m_idx], self.metric_names[m_idx], self.metric_types[m_idx]
617
+ metric_save_path = f"{self.save_path}/{metric_name}/"
618
+ os.makedirs(metric_save_path, exist_ok=True)
619
+
620
+ if metric_type == 'single_value_per_sample':
621
+
622
+ # Compute sample and metric value dtypes based on range
623
+ values, samples = metric_values[:, 0], metric_values[:, 1]
624
+ value_min, value_max = Dist.min_max(values, self.comm_group)
625
+ sample_min, sample_max = Dist.min_max(samples, self.comm_group)
626
+ metric_value_dtype = find_fit_int_dtype(value_min, value_max)
627
+ sample_value_dtype = find_fit_int_dtype(sample_min, sample_max)
628
+
629
+ # sample_to_metric maps sample ids to metric values, as a list of metric values
630
+ sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
631
+ values = [torch.tensor([x]) for x in metric_values[:, 0]]
632
+ self.file_write_ordered(values, sample_to_metric_fname, metric_value_dtype)
633
+
634
+ # distributed sorting by values, gives an ordered disjoint subset of keys on nodes
635
+ metric_values = Dist.sample_sort(metric_values, self.comm_group, self.num_workers)
636
+ metric_to_samples_dict = {}
637
+ if len(metric_values) > 0:
638
+ for value, sample in metric_values:
639
+ if value.item() not in metric_to_samples_dict:
640
+ metric_to_samples_dict[value.item()] = []
641
+ metric_to_samples_dict[value.item()].append(sample.item())
642
+
643
+ # index_to_metric and index_to_sample serialize a dicitonary from metric to samples
644
+ # index_to_metric stores a key per row, index_to_sample stores the values per row
645
+ values = [torch.tensor([x]) for x in metric_to_samples_dict.keys()]
646
+ samples = [torch.tensor(metric_to_samples_dict[x]) for x in metric_to_samples_dict.keys()]
647
+ index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" #dict keys
648
+ index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" #dict values
649
+ self.file_write_ordered(values, index_to_metric_fname, metric_value_dtype)
650
+ self.file_write_ordered(samples, index_to_sample_fname, sample_value_dtype)
651
+
652
+ if self.worker_id == 0:
653
+ DataAnalyzer.output_index_to_sample_percentile(index_to_sample_fname, index_to_metric_fname,
654
+ metric_name, metric_save_path, total_num_samples,
655
+ sample_idx_dtype)
656
+ dist.barrier(self.comm_group)
657
+
658
+ elif metric_type == 'accumulate_value_over_samples':
659
+ metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
660
+ dist.reduce(metric_values, dst=0, op=dist.ReduceOp.SUM, group=self.comm_group)
661
+ metric_value_dtype = find_fit_int_dtype(metric_values.min(), metric_values.max())
662
+
663
+ if self.worker_id == 0:
664
+ builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype)
665
+ builder.add_item(metric_values.cpu())
666
+ close_mmap_dataset_builder(builder, metric_value_fname)
667
+ dist.barrier(self.comm_group)
668
+
669
+ def file_write_ordered(self, tensor_list, fname, numpy_dtype):
670
+ """ MPI_file_write_ordered extended to write a list of tensors, by one rank, iteratively """
671
+
672
+ # each node has a list of rows (tensors) to be written to the file.
673
+ # we will serialize it in order to communicate it in one comm step.
674
+
675
+ tkwargs = dict(dtype=torch.int64, device=self.device)
676
+
677
+ # 1. gather on rank 0 the number of rows to be sent/recv
678
+ row_count = torch.tensor([len(tensor_list)], **tkwargs)
679
+ row_counts = torch.zeros(self.num_workers, **tkwargs)
680
+ dist.all_gather_into_tensor(row_counts, row_count, group=self.comm_group)
681
+ assert row_counts[self.worker_id] == row_count == len(tensor_list), "all_gather failed"
682
+
683
+ # 2. gather on rank 0 the sizes of the rows to be sent/recv
684
+ row_len = torch.tensor([len(l) for l in tensor_list], **tkwargs)
685
+ row_lens = Dist.gather_v(row_len, 0, self.comm_group, self.num_workers, self.worker_id)
686
+
687
+ # 4. gather on rank 0 of the total size (sum of all row lengths) to be received
688
+ size = torch.tensor([sum(row_len).item()], **tkwargs)
689
+ sizes = torch.zeros(self.num_workers, **tkwargs)
690
+ dist.all_gather_into_tensor(sizes, size, group=self.comm_group)
691
+ assert sizes[self.worker_id] == size.item(), "all_gather did not return the same sizes" #sanity check
692
+
693
+ # method to deserializes a buffer into rows of different lengths and write them to file
694
+ def write_buffer_to_file(buff, src, builder):
695
+ assert self.worker_id == 0, "only rank 0 can write to file"
696
+
697
+ # collect all buffers and write them at once
698
+ buff = buff.cpu().detach().numpy()
699
+ row_offsets = np.cumsum([0] + row_lens[src].tolist())
700
+ arr_list = []
701
+ for i in range(len(row_lens[src])):
702
+ arr_list.append(buff[row_offsets[i]:row_offsets[i + 1]])
703
+ builder.add_items(arr_list)
704
+
705
+ # 5. rank 0 prepares output folder and file
706
+ if self.worker_id == 0:
707
+ os.makedirs(os.path.dirname(fname), exist_ok=True)
708
+ builder = create_mmap_dataset_builder(fname, numpy_dtype)
709
+
710
+ # iterate through ranks that have data to be sent/recv/written
711
+ for src in [rank for rank, count in enumerate(row_counts) if count > 0]:
712
+
713
+ dist.barrier(group=self.comm_group)
714
+ if self.worker_id == 0 and src == 0: # rank 0's write its own data
715
+ buffer = torch.cat(tensor_list, dim=0).to(self.device)
716
+ write_buffer_to_file(buffer, 0, builder)
717
+ elif self.worker_id == 0 and src > 0: # rank 0 receives other rank's data and writes it
718
+ buffer = torch.empty(sizes[src].item(), dtype=buffer.dtype, device=buffer.device)
719
+ err = dist.recv(buffer, src=src, group=self.comm_group, tag=src)
720
+ assert err == src and len(buffer) > 0, "recv failed"
721
+ write_buffer_to_file(buffer, src, builder)
722
+ elif self.worker_id == src: # current rank sends data to rank 0
723
+ buffer = torch.cat(tensor_list, dim=0).to(self.device)
724
+ dist.send(buffer, 0, group=self.comm_group, tag=src)
725
+
726
+ # rank 0 closes the file
727
+ if self.worker_id == 0:
728
+ close_mmap_dataset_builder(builder, fname) # close file
729
+ dist.barrier(self.comm_group)
730
+
731
+
732
+ class Dist:
733
+ """ auxiliary class to perform distributed operations on tensors"""
734
+
735
+ @staticmethod
736
+ def min_max(tensor, comm_group):
737
+ """ given a distributed tensor, return the min/max values across all ranks"""
738
+
739
+ value_min, value_max = tensor.min(), tensor.max()
740
+ dist.reduce(value_min, 0, op=dist.ReduceOp.MIN, group=comm_group)
741
+ dist.reduce(value_max, 0, op=dist.ReduceOp.MAX, group=comm_group)
742
+ return value_min.item(), value_max.item()
743
+
744
+ @staticmethod
745
+ def gather_v(tensor, dst, comm_group, num_workers, worker_id):
746
+ """ MPI_Gatherv. gather tensors of variable sizes in a single rank """
747
+
748
+ # gather the number of rows to be sent/recv
749
+ size = torch.tensor([len(tensor)], dtype=torch.int64, device=tensor.device)
750
+ sizes = torch.zeros(num_workers, dtype=torch.int64, device=tensor.device)
751
+ dist.all_gather_into_tensor(sizes, size, group=comm_group)
752
+ assert sizes[worker_id] == size, "all_gather failed"
753
+
754
+ # all_gather requires all tensors to be of same size so we need to pad them
755
+ max_size = max(sizes).item()
756
+ buffer = torch.empty(max_size, dtype=tensor.dtype, device=tensor.device)
757
+ buffer[0:size] = tensor.data
758
+ buffer_list = None
759
+ if worker_id == 0: # create padded recv buffers
760
+ buffer_list = [torch.empty(max_size, dtype=tensor.dtype, device=tensor.device) for _ in range(num_workers)]
761
+ dist.gather(buffer, buffer_list, dst=dst, group=comm_group)
762
+
763
+ # revert padding and return value
764
+ if worker_id == 0:
765
+ buffer_list = [r[:s.item()] for r, s in zip(buffer_list, sizes)]
766
+ return buffer_list
767
+
768
+ @staticmethod
769
+ def sample_sort(tensor, comm_group, num_workers, n_samples=100):
770
+ """ perform a distributed random sort of a tensor, and returns the sorted partial tensor"""
771
+ device, dims = tensor.device, tensor.size()[1]
772
+
773
+ # 1 - sort rows by first column, then second column, then third, etc...
774
+ tensor = torch.tensor(sorted(tensor.tolist()), dtype=tensor.dtype, device=tensor.device)
775
+
776
+ # 2 - collect few samples per rank
777
+ idx = torch.round(torch.linspace(0, len(tensor) - 1, n_samples)).to(int)
778
+ samples = tensor[idx][:, 0].contiguous().to(device) #only first column, all but last row
779
+
780
+ # 2 - Allgather samples
781
+ all_samples = [torch.zeros(n_samples, dtype=samples.dtype, device=device) for _ in range(num_workers)]
782
+ dist.all_gather(all_samples, samples, group=comm_group)
783
+ all_samples = torch.cat(all_samples, dim=0).to(device)
784
+
785
+ # 3 - Sort all samples and collect the ranges of each rank as equidistant
786
+ all_samples = all_samples.sort()[0]
787
+ idx = torch.round(torch.linspace(0, len(all_samples) - 1, num_workers + 1)).to(int)
788
+ ranges = all_samples[idx] # range of each rank r as ranges[r] <= x < ranges[r+1]
789
+ ranges[-1] += 1 # increase upper limit of last rank so that x < ranges[r+1].
790
+
791
+ # 4 - collect elements to send to each rank, based on the rank ranges
792
+ send = []
793
+ for rank in range(num_workers):
794
+ mask = (tensor[:, 0] >= ranges[rank]) & (tensor[:, 0] < ranges[rank + 1])
795
+ send.append(tensor[mask])
796
+
797
+ # 5. all to all to communicate the sizes to be sent/recv
798
+ send_count = [torch.tensor([len(s) * dims], dtype=torch.int64, device=device) for s in send]
799
+ recv_count = list(torch.empty([num_workers], dtype=torch.int64, device=device).chunk(num_workers))
800
+ dist.all_to_all(recv_count, send_count, group=comm_group)
801
+
802
+ # 6. all-to-all-v to communicate the elements to be sent/recv as a single tensor
803
+ send = torch.cat(send, dim=0).flatten().to(device)
804
+ recv = torch.zeros(sum(recv_count), dtype=send.dtype).to(device)
805
+ send_count = [s.item() for s in send_count] # convert to list of ints
806
+ recv_count = [r.item() for r in recv_count]
807
+ dist.all_to_all_single(recv, send, recv_count, send_count, group=comm_group)
808
+ del send
809
+
810
+ # 7. the received tensor is the 1D disjoint subset of the distributed tensor.
811
+ # We will recover the original dimensionality and sort it by columns again.
812
+ recv = recv.view(-1, dims)
813
+ recv = torch.tensor(sorted(recv.tolist()), dtype=recv.dtype, device=recv.device)
814
+ return recv
815
+
816
+
817
+ def test_compare_both_data_analyzers(dataset):
818
+ """ given a dataset, compare file and memory based data analyser"""
819
+
820
+ id = lambda t: t.to(torch.int64) # identity
821
+ batch_sum = lambda t: id(t).sum() #sum batch
822
+ num_threads = 4
823
+ kwargs = dict(
824
+ dataset=dataset,
825
+ batch_size=2**10,
826
+ worker_id=int(os.environ['RANK']),
827
+ num_workers=int(os.environ['WORLD_SIZE']),
828
+ metric_names=["mod", "batch_sum"],
829
+ metric_functions=[id, batch_sum],
830
+ metric_types=['single_value_per_sample', 'accumulate_value_over_samples'],
831
+ num_threads=num_threads,
832
+ )
833
+
834
+ dda = DistributedDataAnalyzer(
835
+ save_path="./output_dist",
836
+ device=f"cuda:{int(os.environ['LOCAL_RANK'])}",
837
+ **kwargs,
838
+ )
839
+ start_time = time.time()
840
+ dda.run_map_reduce()
841
+ if dda.worker_id == 0:
842
+ print("DistributedDataAnalyzer runtime: %s seconds " % (time.time() - start_time))
843
+
844
+ da = DataAnalyzer(num_threads_reduce=num_threads,
845
+ save_path="./output_disk",
846
+ metric_dtypes=[torch.int64, torch.int64],
847
+ **kwargs)
848
+ start_time = time.time()
849
+ da.run_map_reduce()
850
+ if da.worker_id == 0:
851
+ print("DataAnalyzer runtime: %s seconds " % (time.time() - start_time))
852
+
853
+ output_paths = [
854
+ "batch_sum/batch_sum_metric_value.bin", "batch_sum/batch_sum_metric_value.idx", \
855
+ "mod/mod_index_to_metric.bin", "mod/mod_index_to_metric.idx", \
856
+ "mod/mod_index_to_sample.bin", "mod/mod_index_to_sample.idx", \
857
+ "mod/mod_index_to_sample_percentile_merged.bin", "mod/mod_index_to_sample_percentile_merged.idx", \
858
+ "mod/mod_sample_to_metric.bin", "mod/mod_sample_to_metric.idx"
859
+ ]
860
+
861
+ if dda.worker_id == 0:
862
+ for path in output_paths:
863
+ with open(os.path.join(da.save_path, path), 'rb') as f1, \
864
+ open(os.path.join(dda.save_path, path), 'rb') as f2:
865
+ if f1.read() != f2.read():
866
+ print(f"files {path} are not identical.")
867
+
868
+
869
+ if __name__ == "__main__":
870
+
871
+ class TestDataset(torch.utils.data.Dataset):
872
+
873
+ def __init__(self, size=10_000_000):
874
+ self.values = [(x + 7) % 10_000 for x in range(size)]
875
+ self.size = size
876
+
877
+ __len__ = lambda self: self.size
878
+ __getitem__ = lambda self, idx: self.values[idx]
879
+
880
+ test_compare_both_data_analyzers(TestDataset())
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ coding=utf-8
7
+ Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
8
+
9
+ Licensed under the Apache License, Version 2.0 (the "License");
10
+ you may not use this file except in compliance with the License.
11
+ You may obtain a copy of the License at
12
+
13
+ http://www.apache.org/licenses/LICENSE-2.0
14
+
15
+ Unless required by applicable law or agreed to in writing, software
16
+ distributed under the License is distributed on an "AS IS" BASIS,
17
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ See the License for the specific language governing permissions and
19
+ limitations under the License.
20
+ Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py
21
+ """
22
+
23
+ import torch
24
+ import os
25
+ import numpy as np
26
+
27
+ import deepspeed.comm as dist
28
+ from deepspeed.utils import logger
29
+ from deepspeed.accelerator import get_accelerator
30
+ from ..constants import *
31
+ from ..curriculum_scheduler import CurriculumScheduler
32
+ from .indexed_dataset import MMapIndexedDataset
33
+ from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
34
+
35
+
36
+ class DeepSpeedDataSampler(object):
37
+
38
+ def __init__(self,
39
+ data_efficiency_config,
40
+ one_epoch_total_samples,
41
+ micro_batch_size,
42
+ data_parallel_rank,
43
+ data_parallel_size,
44
+ data_parallel_group,
45
+ gradient_accumulation_steps,
46
+ global_rank,
47
+ drop_last=True):
48
+ # Keep a copy of input params for later use.
49
+ self.data_efficiency_config = data_efficiency_config
50
+ self.one_epoch_total_samples = one_epoch_total_samples
51
+ self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples)
52
+ self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][
53
+ DATA_SAMPLING_NUM_EPOCHS]
54
+ self.micro_batch_size = micro_batch_size
55
+ self.data_parallel_rank = data_parallel_rank
56
+ self.data_parallel_group = data_parallel_group
57
+ self.micro_batch_times_data_parallel_size = \
58
+ self.micro_batch_size * data_parallel_size
59
+ self.gradient_accumulation_steps = gradient_accumulation_steps
60
+ self.global_batch_size = self.micro_batch_times_data_parallel_size * \
61
+ self.gradient_accumulation_steps
62
+ self.global_rank = global_rank
63
+ self.drop_last = drop_last
64
+ self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED])
65
+ self.state = {}
66
+ self.batch = []
67
+ self.consumed_samples = 0
68
+ if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
69
+ self.curriculum_step = 0
70
+ self.current_difficulties = {}
71
+ self.data_cluster_paths = []
72
+ self.data_cluster_current_position = []
73
+ self.curriculum_schedulers = {}
74
+ self.curriculum_index_to_sample = {}
75
+ self.curriculum_index_to_metric = {}
76
+ self.difficulty_type = {}
77
+ self.clustering_type = {}
78
+ self.data_1epoch_size = None
79
+ if self.global_rank == 0:
80
+ self.data_clusters = []
81
+ self.data_cluster_sizes = []
82
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
83
+ CURRICULUM_LEARNING_CLUSTER_PATH]
84
+ if not os.path.exists(cluster_path):
85
+ os.makedirs(cluster_path)
86
+ for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]:
87
+ self.curriculum_schedulers[metric] = CurriculumScheduler(
88
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric])
89
+ self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
90
+ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE]
91
+ self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
92
+ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE]
93
+ if self.global_rank == 0:
94
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
95
+ self.curriculum_index_to_sample[metric] = MMapIndexedDataset(
96
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
97
+ [metric][CURRICULUM_LEARNING_SAMPLE_PATH],
98
+ skip_warmup=True)
99
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
100
+ self.curriculum_index_to_metric[metric] = MMapIndexedDataset(
101
+ data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
102
+ [metric][CURRICULUM_LEARNING_METRIC_PATH],
103
+ skip_warmup=True)
104
+
105
+ # Sanity checks.
106
+ assert self.total_samples > 0, \
107
+ 'no sample to consume: {}'.format(self.total_samples)
108
+ assert self.micro_batch_size > 0
109
+ assert data_parallel_size > 0
110
+ assert self.data_parallel_rank < data_parallel_size, \
111
+ 'data_parallel_rank should be smaller than data size: {}, ' \
112
+ '{}'.format(self.data_parallel_rank, data_parallel_size)
113
+
114
+ def __len__(self):
115
+ return self.total_samples
116
+
117
+ def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
118
+ for metric in self.curriculum_schedulers:
119
+ if metric in schedule_func_dict:
120
+ self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric])
121
+
122
+ def get_start_end_idx(self, batch_len=None):
123
+ """
124
+ given the length of a minibatch (defaults to micro-batch size * data_parallel_size),
125
+ return the start and end indices of the current data parallel rank
126
+ """
127
+ batch_len = batch_len or self.micro_batch_times_data_parallel_size
128
+ start_idx_fn = lambda r: round(r * batch_len / self.data_parallel_group.size())
129
+ start_idx = start_idx_fn(self.data_parallel_rank)
130
+ end_idx = start_idx_fn(self.data_parallel_rank + 1)
131
+ return start_idx, end_idx
132
+
133
+ def get_sample_based_on_metric_value(self, metric, value_start, value_end):
134
+ new_samples = None
135
+ for row in range(len(self.curriculum_index_to_sample[metric])):
136
+ if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][
137
+ row] > value_start:
138
+ row_samples = np.copy(self.curriculum_index_to_sample[metric][row])
139
+ new_samples = row_samples if new_samples is None else np.concatenate(
140
+ (new_samples, row_samples), axis=None)
141
+ return new_samples
142
+
143
+ def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end):
144
+ new_samples = None
145
+ if self.data_1epoch_size is None:
146
+ self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric])
147
+ max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][
148
+ metric][CURRICULUM_LEARNING_MAX_DIFFICULTY]
149
+ sample_per_percentile = self.data_1epoch_size // max_percentile
150
+ start_count = sample_per_percentile * percentile_start
151
+ end_count = sample_per_percentile * percentile_end
152
+ if percentile_end == max_percentile:
153
+ end_count = self.data_1epoch_size
154
+ current_count = 0
155
+ for row in range(len(self.curriculum_index_to_sample[metric])):
156
+ row_size = len(self.curriculum_index_to_sample[metric][row])
157
+ if current_count + row_size > start_count:
158
+ row_start = max(0, start_count - current_count)
159
+ if current_count + row_size <= end_count:
160
+ row_end = row_size
161
+ else:
162
+ row_end = end_count - current_count
163
+ row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end])
164
+ new_samples = row_samples if new_samples is None else np.concatenate(
165
+ (new_samples, row_samples), axis=None)
166
+ current_count += row_size
167
+ if current_count >= end_count:
168
+ break
169
+ return new_samples
170
+
171
+ def get_new_cluster(self, previous_difficulties):
172
+ cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX
173
+ for metric in self.curriculum_schedulers:
174
+ cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}"
175
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
176
+ CURRICULUM_LEARNING_CLUSTER_PATH]
177
+ cluster_path = f"{cluster_path}/{cluster_fname}"
178
+ if self.global_rank == 0:
179
+ new_cluster = None
180
+ need_clustering = 0
181
+ for metric in self.clustering_type:
182
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
183
+ need_clustering += 1
184
+ if need_clustering > 1:
185
+ for metric in self.curriculum_schedulers:
186
+ if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER:
187
+ metric_cluster = np.arange(start=0,
188
+ stop=self.one_epoch_total_samples,
189
+ step=1,
190
+ dtype=self.index_dtype)
191
+ else:
192
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
193
+ metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'),
194
+ self.current_difficulties[metric])
195
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
196
+ metric_cluster = self.get_sample_based_on_metric_percentile(
197
+ metric, 0, self.current_difficulties[metric])
198
+ new_cluster = metric_cluster if new_cluster is None else \
199
+ np.intersect1d(new_cluster, metric_cluster, assume_unique=True)
200
+ for cluster in self.data_clusters:
201
+ new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True)
202
+ else:
203
+ if len(self.data_clusters) == 0:
204
+ new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype)
205
+ for metric in self.curriculum_schedulers:
206
+ if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
207
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
208
+ new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric],
209
+ self.current_difficulties[metric])
210
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
211
+ new_cluster = self.get_sample_based_on_metric_percentile(
212
+ metric, previous_difficulties[metric], self.current_difficulties[metric])
213
+ if new_cluster is not None and len(new_cluster) > 0:
214
+ logger.info(
215
+ f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated."
216
+ )
217
+ self.np_rng.shuffle(new_cluster)
218
+ cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
219
+ cluster_builder.add_item_numpy(new_cluster)
220
+ close_mmap_dataset_builder(cluster_builder, cluster_path)
221
+ self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
222
+ self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
223
+ else:
224
+ logger.info(
225
+ f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped."
226
+ )
227
+ dist.barrier(group=self.data_parallel_group)
228
+ if os.path.isfile(f"{cluster_path}.bin"):
229
+ self.data_cluster_paths.append(cluster_fname)
230
+ self.data_cluster_current_position.append(0)
231
+
232
+ def sample_from_clusters(self):
233
+ num_clusters = len(self.data_clusters)
234
+ weight_sum = sum(self.data_cluster_sizes)
235
+ weights = [x / weight_sum for x in self.data_cluster_sizes]
236
+ samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights)
237
+ samples = np.bincount(samples, minlength=num_clusters)
238
+ return samples
239
+
240
+ def reshuffle_clusters(self, cidx):
241
+ cluster_fname = self.data_cluster_paths[cidx]
242
+ cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
243
+ CURRICULUM_LEARNING_CLUSTER_PATH]
244
+ cluster_path = f"{cluster_path}/{cluster_fname}"
245
+ cluster = np.copy(self.data_clusters[cidx][0])
246
+ self.np_rng.shuffle(cluster)
247
+ cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
248
+ cluster_builder.add_item_numpy(cluster)
249
+ close_mmap_dataset_builder(cluster_builder, cluster_path)
250
+ self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True)
251
+
252
+ def get_sample_from_cluster(self, cidx, num_samples):
253
+ start_idx = self.data_cluster_current_position[cidx]
254
+ samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)]))
255
+ self.data_cluster_current_position[cidx] += num_samples
256
+ if len(samples) < num_samples:
257
+ num_samples_remained = num_samples - len(samples)
258
+ logger.info(f"reshuffling cluster {cidx}.")
259
+ self.reshuffle_clusters(cidx)
260
+ samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained]))
261
+ self.data_cluster_current_position[cidx] = num_samples_remained
262
+ return samples
263
+
264
+ def get_next_global_batch(self):
265
+ if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
266
+ self.curriculum_step += 1
267
+ new_cluster = False
268
+ previous_difficulties = {}
269
+ for metric in self.curriculum_schedulers:
270
+ next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step)
271
+ if metric not in self.current_difficulties or \
272
+ next_difficulty != self.current_difficulties[metric]:
273
+ new_cluster = True
274
+ if metric in self.current_difficulties:
275
+ previous_difficulties[metric] = self.current_difficulties[metric]
276
+ else:
277
+ if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
278
+ previous_difficulties[metric] = float('-inf')
279
+ elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
280
+ previous_difficulties[metric] = 0
281
+ self.current_difficulties[metric] = next_difficulty
282
+ if new_cluster:
283
+ self.get_new_cluster(previous_difficulties)
284
+ if self.global_rank == 0:
285
+ samples_per_cluster = self.sample_from_clusters()
286
+ batch = []
287
+ for cidx in range(len(samples_per_cluster)):
288
+ batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx])
289
+ self.np_rng.shuffle(batch)
290
+
291
+ # broadcast tensor must have same shape across participants. So we fill batch with -1s when not full
292
+ assert len(batch) <= self.global_batch_size
293
+ batch += [-1] * (self.global_batch_size - len(batch))
294
+ batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1)
295
+ else:
296
+ batch = torch.empty(self.global_batch_size,
297
+ device=get_accelerator().current_device_name(),
298
+ dtype=torch.long)
299
+ dist.broadcast(batch, 0, group=self.data_parallel_group)
300
+ batch = batch[batch != -1] # remove trailing -1s used to fill incomplete batch tensor
301
+ self.batch = batch.tolist()
302
+
303
+ def __iter__(self):
304
+ while self.consumed_samples <= self.total_samples:
305
+ if len(self.batch) == 0:
306
+ self.get_next_global_batch()
307
+ current_batch = self.batch[:self.micro_batch_times_data_parallel_size]
308
+ self.batch = self.batch[self.micro_batch_times_data_parallel_size:]
309
+ if len(current_batch) == self.micro_batch_times_data_parallel_size or \
310
+ (len(current_batch) > 0 and not self.drop_last):
311
+ start_idx, end_idx = self.get_start_end_idx(len(current_batch))
312
+ yield current_batch[start_idx:end_idx]
313
+ self.consumed_samples += len(current_batch)
314
+ current_batch = []
315
+
316
+ def state_dict(self):
317
+ return {
318
+ CURRICULUM_LEARNING_BATCH: self.batch,
319
+ CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples,
320
+ CURRICULUM_LEARNING_STEP: self.curriculum_step,
321
+ CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties,
322
+ CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths,
323
+ CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position,
324
+ CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state()
325
+ }
326
+
327
+ def load_state_dict(self, state_dict):
328
+ self.batch = state_dict[CURRICULUM_LEARNING_BATCH]
329
+ self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES]
330
+ self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP]
331
+ self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES]
332
+ self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS]
333
+ self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION]
334
+ np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE])
335
+ cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
336
+ CURRICULUM_LEARNING_CLUSTER_PATH]
337
+ # Backward compatibility: previously data_cluster_paths were stored as
338
+ # absolute paths. Now we changed it to just the file name so that even
339
+ # if user moved the cluster files, the checkpoint loading still works
340
+ # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH
341
+ # in deepspeed json config.
342
+ for idx in range(len(self.data_cluster_paths)):
343
+ if '/' in self.data_cluster_paths[idx]:
344
+ self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1]
345
+ if self.global_rank == 0:
346
+ for cluster_fname in self.data_cluster_paths:
347
+ cluster_path = f"{cluster_root_path}/{cluster_fname}"
348
+ self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
349
+ self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py
7
+ """
8
+
9
+ # Copyright (c) Facebook, Inc. and its affiliates.
10
+ #
11
+ # This source code is licensed under the MIT license found in the
12
+ # LICENSE file in the root directory of this source tree.
13
+
14
+ # copied from fairseq/fairseq/data/indexed_dataset.py
15
+ # Removed IndexedRawTextDataset since it relied on Fairseq dictionary
16
+ # other slight modifications to remove fairseq dependencies
17
+ # Added document index to index file and made it accessible.
18
+ # An empty sentence no longer separates documents.
19
+
20
+ # Some of the fixes/improvements are adopted from
21
+ # https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py
22
+
23
+ from functools import lru_cache
24
+ import os
25
+ import shutil
26
+ import struct
27
+ from itertools import accumulate
28
+
29
+ import numpy as np
30
+ import torch
31
+
32
+
33
+ def __best_fitting_dtype(vocab_size=None):
34
+ if vocab_size is not None and vocab_size < 65500:
35
+ return np.uint16
36
+ else:
37
+ return np.int32
38
+
39
+
40
+ def get_available_dataset_impl():
41
+ return ['lazy', 'cached', 'mmap']
42
+
43
+
44
+ def infer_dataset_impl(path):
45
+ if IndexedDataset.exists(path):
46
+ with open(index_file_path(path), 'rb') as f:
47
+ magic = f.read(8)
48
+ if magic == IndexedDataset._HDR_MAGIC:
49
+ return 'cached'
50
+ elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
51
+ return 'mmap'
52
+ else:
53
+ return None
54
+ else:
55
+ print(f"Dataset does not exist: {path}")
56
+ print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
57
+ return None
58
+
59
+
60
+ def make_builder(out_file, impl, vocab_size=None):
61
+ if impl == 'mmap':
62
+ return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
63
+ else:
64
+ return IndexedDatasetBuilder(out_file)
65
+
66
+
67
+ def make_dataset(path, impl, skip_warmup=False):
68
+ if not IndexedDataset.exists(path):
69
+ print(f"Dataset does not exist: {path}")
70
+ print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
71
+ return None
72
+ if impl == 'infer':
73
+ impl = infer_dataset_impl(path)
74
+ if impl == 'lazy' and IndexedDataset.exists(path):
75
+ return IndexedDataset(path)
76
+ elif impl == 'cached' and IndexedDataset.exists(path):
77
+ return IndexedCachedDataset(path)
78
+ elif impl == 'mmap' and MMapIndexedDataset.exists(path):
79
+ return MMapIndexedDataset(path, skip_warmup)
80
+ print(f"Unknown dataset implementation: {impl}")
81
+ return None
82
+
83
+
84
+ def dataset_exists(path, impl):
85
+ if impl == 'mmap':
86
+ return MMapIndexedDataset.exists(path)
87
+ else:
88
+ return IndexedDataset.exists(path)
89
+
90
+
91
+ def read_longs(f, n):
92
+ a = np.empty(n, dtype=np.int64)
93
+ f.readinto(a)
94
+ return a
95
+
96
+
97
+ def write_longs(f, a):
98
+ f.write(np.array(a, dtype=np.int64))
99
+
100
+
101
+ # valid metric_dtypes as numpy and torch types
102
+ dtypes = {
103
+ 1: (np.uint8, torch.uint8),
104
+ 2: (np.int8, torch.int8),
105
+ 3: (np.int16, torch.int16),
106
+ 4: (np.int32, torch.int32),
107
+ 5: (np.int64, torch.int64),
108
+ 6: (np.uint16, None),
109
+ 7: (np.uint32, None),
110
+ 8: (np.uint64, None),
111
+ }
112
+
113
+ valid_dtypes = set([dt[0] for dt in dtypes.values()] + [dt[1] for dt in dtypes.values() if dt[1] is not None])
114
+
115
+
116
+ def code(dtype):
117
+ for c, (np_dt, torch_dt) in dtypes.items():
118
+ if dtype in [np_dt, torch_dt]:
119
+ return c
120
+ raise ValueError(f"{dtype} not supported. Supported types: {valid_dtypes}")
121
+
122
+
123
+ def index_file_path(prefix_path):
124
+ return prefix_path + '.idx'
125
+
126
+
127
+ def data_file_path(prefix_path):
128
+ return prefix_path + '.bin'
129
+
130
+
131
+ def create_doc_idx(sizes):
132
+ doc_idx = [0]
133
+ for i, s in enumerate(sizes):
134
+ if s == 0:
135
+ doc_idx.append(i + 1)
136
+ return doc_idx
137
+
138
+
139
+ class IndexedDataset(torch.utils.data.Dataset):
140
+ """Loader for IndexedDataset"""
141
+ _HDR_MAGIC = b'TNTIDX\x00\x00'
142
+
143
+ def __init__(self, path):
144
+ super().__init__()
145
+ self.path = path
146
+ self.data_file = None
147
+ self.read_index(path)
148
+
149
+ def read_index(self, path):
150
+ with open(index_file_path(path), 'rb') as f:
151
+ magic = f.read(8)
152
+ assert magic == self._HDR_MAGIC, ('Index file doesn\'t match expected format. '
153
+ 'Make sure that --dataset-impl is configured properly.')
154
+ version = f.read(8)
155
+ assert struct.unpack('<Q', version) == (1, )
156
+ code, self.element_size = struct.unpack('<QQ', f.read(16))
157
+ self.dtype = dtypes[code][0] #numpy type
158
+ self._len, self.s = struct.unpack('<QQ', f.read(16))
159
+ self.doc_count = struct.unpack('<Q', f.read(8))
160
+ self.dim_offsets = read_longs(f, self._len + 1)
161
+ self.data_offsets = read_longs(f, self._len + 1)
162
+ self.sizes = read_longs(f, self.s)
163
+ self.doc_idx = read_longs(f, self.doc_count)
164
+
165
+ def read_data(self, path):
166
+ self.data_file = open(data_file_path(path), 'rb', buffering=0)
167
+
168
+ def check_index(self, i):
169
+ if i < 0 or i >= self._len:
170
+ raise IndexError('index out of range')
171
+
172
+ def __del__(self):
173
+ if self.data_file:
174
+ self.data_file.close()
175
+
176
+ # @lru_cache(maxsize=8)
177
+ def __getitem__(self, idx):
178
+ if not self.data_file:
179
+ self.read_data(self.path)
180
+ if isinstance(idx, int):
181
+ i = idx
182
+ self.check_index(i)
183
+ tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
184
+ a = np.empty(tensor_size, dtype=self.dtype)
185
+ self.data_file.seek(self.data_offsets[i] * self.element_size)
186
+ self.data_file.readinto(a)
187
+ return a
188
+ elif isinstance(idx, slice):
189
+ start, stop, step = idx.indices(len(self))
190
+ if step != 1:
191
+ raise ValueError("Slices into indexed_dataset must be contiguous")
192
+ sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
193
+ size = sum(sizes)
194
+ a = np.empty(size, dtype=self.dtype)
195
+ self.data_file.seek(self.data_offsets[start] * self.element_size)
196
+ self.data_file.readinto(a)
197
+ offsets = list(accumulate(sizes))
198
+ sents = np.split(a, offsets[:-1])
199
+ return sents
200
+
201
+ def __len__(self):
202
+ return self._len
203
+
204
+ def num_tokens(self, index):
205
+ return self.sizes[index]
206
+
207
+ def size(self, index):
208
+ return self.sizes[index]
209
+
210
+ @staticmethod
211
+ def exists(path):
212
+ return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
213
+
214
+ @property
215
+ def supports_prefetch(self):
216
+ return False # avoid prefetching to save memory
217
+
218
+
219
+ class IndexedCachedDataset(IndexedDataset):
220
+
221
+ def __init__(self, path):
222
+ super().__init__(path)
223
+ self.cache = None
224
+ self.cache_index = {}
225
+
226
+ @property
227
+ def supports_prefetch(self):
228
+ return True
229
+
230
+ def prefetch(self, indices):
231
+ if all(i in self.cache_index for i in indices):
232
+ return
233
+ if not self.data_file:
234
+ self.read_data(self.path)
235
+ indices = sorted(set(indices))
236
+ total_size = 0
237
+ for i in indices:
238
+ total_size += self.data_offsets[i + 1] - self.data_offsets[i]
239
+ self.cache = np.empty(total_size, dtype=self.dtype)
240
+ ptx = 0
241
+ self.cache_index.clear()
242
+ for i in indices:
243
+ self.cache_index[i] = ptx
244
+ size = self.data_offsets[i + 1] - self.data_offsets[i]
245
+ a = self.cache[ptx:ptx + size]
246
+ self.data_file.seek(self.data_offsets[i] * self.element_size)
247
+ self.data_file.readinto(a)
248
+ ptx += size
249
+ if self.data_file:
250
+ # close and delete data file after prefetch so we can pickle
251
+ self.data_file.close()
252
+ self.data_file = None
253
+
254
+ # @lru_cache(maxsize=8)
255
+ def __getitem__(self, idx):
256
+ if isinstance(idx, int):
257
+ i = idx
258
+ self.check_index(i)
259
+ tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
260
+ a = np.empty(tensor_size, dtype=self.dtype)
261
+ ptx = self.cache_index[i]
262
+ np.copyto(a, self.cache[ptx:ptx + a.size])
263
+ return a
264
+ elif isinstance(idx, slice):
265
+ # Hack just to make this work, can optimizer later if necessary
266
+ sents = []
267
+ for i in range(*idx.indices(len(self))):
268
+ sents.append(self[i])
269
+ return sents
270
+
271
+
272
+ class IndexedDatasetBuilder(object):
273
+
274
+ def __init__(self, out_file, dtype=np.int32):
275
+ self.out_file = open(out_file, 'wb')
276
+ self.dtype = dtype
277
+ self.data_offsets = [0]
278
+ self.dim_offsets = [0]
279
+ self.sizes = []
280
+ self.element_size = self.dtype().itemsize
281
+ self.doc_idx = [0]
282
+
283
+ def add_item(self, tensor):
284
+ bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
285
+ self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
286
+ for s in tensor.size():
287
+ self.sizes.append(s)
288
+ self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
289
+
290
+ def end_document(self):
291
+ self.doc_idx.append(len(self.sizes))
292
+
293
+ def merge_file_(self, another_file):
294
+ index = IndexedDataset(another_file)
295
+ assert index.dtype == self.dtype
296
+
297
+ doc_offset = len(self.sizes)
298
+
299
+ begin = self.data_offsets[-1]
300
+ for data_offset in index.data_offsets[1:]:
301
+ self.data_offsets.append(begin + data_offset)
302
+ self.sizes.extend(index.sizes)
303
+ begin = self.dim_offsets[-1]
304
+ for dim_offset in index.dim_offsets[1:]:
305
+ self.dim_offsets.append(begin + dim_offset)
306
+ self.doc_idx.extend((doc_offset + index.doc_idx)[1:])
307
+
308
+ with open(data_file_path(another_file), 'rb') as f:
309
+ while True:
310
+ data = f.read(1024)
311
+ if data:
312
+ self.out_file.write(data)
313
+ else:
314
+ break
315
+
316
+ def finalize(self, index_file):
317
+ self.out_file.close()
318
+ index = open(index_file, 'wb')
319
+ index.write(b'TNTIDX\x00\x00')
320
+ index.write(struct.pack('<Q', 1))
321
+ index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
322
+ index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
323
+ index.write(struct.pack('<Q', len(self.doc_idx)))
324
+ write_longs(index, self.dim_offsets)
325
+ write_longs(index, self.data_offsets)
326
+ write_longs(index, self.sizes)
327
+ write_longs(index, self.doc_idx)
328
+ index.close()
329
+
330
+
331
+ def _warmup_mmap_file(path):
332
+ with open(path, 'rb') as stream:
333
+ while stream.read(100 * 1024 * 1024):
334
+ pass
335
+
336
+
337
+ def exscan_from_cumsum_(arr):
338
+ # given an array holding the result of an inclusive scan (cumsum),
339
+ # convert to an exclusive scan (shift to the right)
340
+ # [10, 30, 35, 50] --> [0, 10, 30, 35]
341
+ if arr.size > 1:
342
+ arr[1:] = arr[:-1]
343
+ if arr.size > 0:
344
+ arr[0] = 0
345
+
346
+
347
+ def get_pointers_with_total(sizes, elemsize, dtype):
348
+ """Return a numpy array of type np.dtype giving the byte offsets.
349
+
350
+ Multiplies values in the sizes array by elemsize (bytes),
351
+ and then computes an exclusive scan to get byte offsets.
352
+ Returns the total number of bytes as second item in a tuple.
353
+ """
354
+
355
+ # scale values in sizes array by elemsize to get sizes in bytes
356
+ pointers = np.array(sizes, dtype=dtype)
357
+ pointers *= elemsize
358
+ np.cumsum(pointers, axis=0, out=pointers)
359
+
360
+ # get total number of bytes from all sizes (last element)
361
+ bytes_last = pointers[-1] if len(sizes) > 0 else 0
362
+
363
+ # convert to byte offsets
364
+ exscan_from_cumsum_(pointers)
365
+
366
+ return pointers, bytes_last
367
+
368
+
369
+ class MMapIndexedDataset(torch.utils.data.Dataset):
370
+
371
+ class Index(object):
372
+ _HDR_MAGIC = b'MMIDIDX\x00\x00'
373
+
374
+ @classmethod
375
+ def writer(cls, path, dtype):
376
+
377
+ class _Writer(object):
378
+
379
+ def __enter__(self):
380
+ self._file = open(path, 'wb')
381
+
382
+ self._file.write(cls._HDR_MAGIC)
383
+ self._file.write(struct.pack('<Q', 1))
384
+ self._file.write(struct.pack('<B', code(dtype)))
385
+
386
+ return self
387
+
388
+ @staticmethod
389
+ def _get_pointers(sizes, npdtype):
390
+ """Return a numpy array of byte offsets given a list of sizes.
391
+
392
+ Multiplies values in the sizes array by dtype size (bytes),
393
+ and then computes an exclusive scan to get byte offsets.
394
+ """
395
+
396
+ # compute element sizes in bytes
397
+ pointers, _ = get_pointers_with_total(sizes, dtype().itemsize, npdtype)
398
+ return pointers
399
+
400
+ def write(self, sizes, doc_idx):
401
+ self._file.write(struct.pack('<Q', len(sizes)))
402
+ self._file.write(struct.pack('<Q', len(doc_idx)))
403
+
404
+ sizes32 = np.array(sizes, dtype=np.int32)
405
+ self._file.write(sizes32.tobytes(order='C'))
406
+ del sizes32
407
+
408
+ pointers = self._get_pointers(sizes, np.int64)
409
+ del sizes
410
+ self._file.write(pointers.tobytes(order='C'))
411
+ del pointers
412
+
413
+ doc_idx = np.array(doc_idx, dtype=np.int64)
414
+ self._file.write(doc_idx.tobytes(order='C'))
415
+
416
+ def __exit__(self, exc_type, exc_val, exc_tb):
417
+ self._file.close()
418
+
419
+ return _Writer()
420
+
421
+ def __init__(self, path, skip_warmup=False):
422
+ with open(path, 'rb') as stream:
423
+ magic_test = stream.read(9)
424
+ assert self._HDR_MAGIC == magic_test, ('Index file doesn\'t match expected format. '
425
+ 'Make sure that --dataset-impl is configured properly.')
426
+ version = struct.unpack('<Q', stream.read(8))
427
+ assert (1, ) == version
428
+
429
+ dtype_code, = struct.unpack('<B', stream.read(1))
430
+ self._dtype = dtypes[dtype_code][0] #numpy type
431
+ self._dtype_size = self._dtype().itemsize
432
+
433
+ self._len = struct.unpack('<Q', stream.read(8))[0]
434
+ self._doc_count = struct.unpack('<Q', stream.read(8))[0]
435
+ offset = stream.tell()
436
+
437
+ if not skip_warmup:
438
+ print(" warming up index mmap file...")
439
+ _warmup_mmap_file(path)
440
+
441
+ self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
442
+ self._bin_buffer = memoryview(self._bin_buffer_mmap)
443
+ print(" reading sizes...")
444
+ self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
445
+ print(" reading pointers...")
446
+ self._pointers = np.frombuffer(self._bin_buffer,
447
+ dtype=np.int64,
448
+ count=self._len,
449
+ offset=offset + self._sizes.nbytes)
450
+ print(" reading document index...")
451
+ self._doc_idx = np.frombuffer(self._bin_buffer,
452
+ dtype=np.int64,
453
+ count=self._doc_count,
454
+ offset=offset + self._sizes.nbytes + self._pointers.nbytes)
455
+
456
+ def __del__(self):
457
+ self._bin_buffer_mmap._mmap.close()
458
+ del self._bin_buffer_mmap
459
+
460
+ @property
461
+ def dtype(self):
462
+ return self._dtype
463
+
464
+ @property
465
+ def sizes(self):
466
+ return self._sizes
467
+
468
+ @property
469
+ def doc_idx(self):
470
+ return self._doc_idx
471
+
472
+ @lru_cache(maxsize=8)
473
+ def __getitem__(self, i):
474
+ return self._pointers[i], self._sizes[i]
475
+
476
+ def __len__(self):
477
+ return self._len
478
+
479
+ def __init__(self, path, skip_warmup=False):
480
+ super().__init__()
481
+
482
+ self._path = None
483
+ self._index = None
484
+ self._bin_buffer = None
485
+
486
+ self._do_init(path, skip_warmup)
487
+
488
+ def __getstate__(self):
489
+ return self._path
490
+
491
+ def __setstate__(self, state):
492
+ self._do_init(state)
493
+
494
+ def _do_init(self, path, skip_warmup):
495
+ self._path = path
496
+ self._index = self.Index(index_file_path(self._path), skip_warmup)
497
+
498
+ if not skip_warmup:
499
+ print(" warming up data mmap file...")
500
+ _warmup_mmap_file(data_file_path(self._path))
501
+ print(" creating numpy buffer of mmap...")
502
+ self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
503
+ print(" creating memory view of numpy buffer...")
504
+ self._bin_buffer = memoryview(self._bin_buffer_mmap)
505
+
506
+ def __del__(self):
507
+ self._bin_buffer_mmap._mmap.close()
508
+ del self._bin_buffer_mmap
509
+ del self._index
510
+
511
+ def __len__(self):
512
+ return len(self._index)
513
+
514
+ # @lru_cache(maxsize=8)
515
+ def __getitem__(self, idx):
516
+ if isinstance(idx, int):
517
+ ptr, size = self._index[idx]
518
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
519
+ return np_array
520
+ elif isinstance(idx, slice):
521
+ start, stop, step = idx.indices(len(self))
522
+ if step != 1:
523
+ raise ValueError("Slices into indexed_dataset must be contiguous")
524
+ ptr = self._index._pointers[start]
525
+ sizes = self._index._sizes[idx]
526
+ offsets = list(accumulate(sizes))
527
+ total_size = sum(sizes)
528
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
529
+ sents = np.split(np_array, offsets[:-1])
530
+ return sents
531
+
532
+ def get(self, idx, offset=0, length=None):
533
+ """ Retrieves a single item from the dataset with the option to only
534
+ return a portion of the item.
535
+
536
+ get(idx) is the same as [idx] but get() does not support slicing.
537
+ """
538
+ ptr, size = self._index[idx]
539
+ if length is None:
540
+ length = size - offset
541
+ ptr += offset * np.dtype(self._index.dtype).itemsize
542
+ np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
543
+ return np_array
544
+
545
+ @property
546
+ def sizes(self):
547
+ return self._index.sizes
548
+
549
+ def size(self, index):
550
+ return self._index.sizes[index]
551
+
552
+ @property
553
+ def doc_idx(self):
554
+ return self._index.doc_idx
555
+
556
+ def get_doc_idx(self):
557
+ return self._index._doc_idx
558
+
559
+ def set_doc_idx(self, doc_idx_):
560
+ self._index._doc_idx = doc_idx_
561
+
562
+ @property
563
+ def supports_prefetch(self):
564
+ return False
565
+
566
+ @staticmethod
567
+ def exists(path):
568
+ return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
569
+
570
+ @property
571
+ def dtype(self):
572
+ return self._index.dtype
573
+
574
+
575
+ class MMapIndexedDatasetBuilder(object):
576
+
577
+ def __init__(self, out_file, dtype=np.int64):
578
+ self._data_file = open(out_file, 'wb')
579
+ self._dtype = [np_dt for np_dt, torch_dt in dtypes.values() if dtype in [np_dt, torch_dt]][0]
580
+ self._sizes = []
581
+ self._doc_idx = [0]
582
+
583
+ def add_item(self, tensor):
584
+ """ write the tensor to the file and update its size in the index"""
585
+ np_array = np.array(tensor.numpy(), dtype=self._dtype)
586
+ self._data_file.write(np_array.tobytes(order='C'))
587
+ self._sizes.append(np_array.size)
588
+
589
+ def add_items(self, arr_list):
590
+ """ write a list of arrays to the file and update their sizes in the index"""
591
+ np_arrays = [arr.astype(self._dtype) for arr in arr_list]
592
+ self._data_file.writelines([arr.tobytes(order='C') for arr in np_arrays])
593
+ for arr in np_arrays:
594
+ self._sizes.append(arr.size)
595
+
596
+ def add_item_numpy(self, np_array):
597
+ if np_array.dtype != self._dtype:
598
+ np_array = np_array.astype(self._dtype)
599
+ self._data_file.write(np_array.tobytes(order='C'))
600
+ self._sizes.append(np_array.size)
601
+
602
+ def end_document(self):
603
+ self._doc_idx.append(len(self._sizes))
604
+
605
+ def merge_file_(self, another_file):
606
+ # Concatenate index
607
+ index = MMapIndexedDataset.Index(index_file_path(another_file))
608
+ assert index.dtype == self._dtype
609
+
610
+ total_len = len(index.sizes) + len(self._sizes)
611
+ print(f" concat {another_file} size={len(index.sizes)} for a total size of {total_len}")
612
+
613
+ offset = len(self._sizes)
614
+ self._sizes.extend(index.sizes)
615
+ self._doc_idx.extend((offset + index.doc_idx)[1:])
616
+
617
+ # Concatenate data
618
+ with open(data_file_path(another_file), 'rb') as f:
619
+ shutil.copyfileobj(f, self._data_file)
620
+ self._data_file.flush()
621
+ assert os.stat(self._data_file.name).st_size != 0, f"Zero-sized file: {self._data_file.name}"
622
+
623
+ def finalize(self, index_file):
624
+ self._data_file.close()
625
+
626
+ with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
627
+ index.write(self._sizes, self._doc_idx)
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import numpy as np
7
+
8
+ from deepspeed.utils import logger
9
+ from .indexed_dataset import MMapIndexedDatasetBuilder
10
+
11
+
12
+ def find_fit_int_dtype(min_value, max_value):
13
+ if min_value >= 0:
14
+ if max_value <= 255:
15
+ return np.uint8
16
+ elif max_value <= 65535:
17
+ return np.uint16
18
+ elif max_value <= 4294967295:
19
+ return np.uint32
20
+ else:
21
+ return np.uint64
22
+ else:
23
+ if max_value <= 127 and min_value >= -128:
24
+ return np.int8
25
+ elif max_value <= 32767 and min_value >= -32768:
26
+ return np.int16
27
+ elif max_value <= 2147483647 and min_value >= -2147483648:
28
+ return np.int32
29
+ else:
30
+ return np.int64
31
+
32
+
33
+ def split_index(start_idx, end_idx, num_partitions):
34
+ partition_boundaries = np.linspace(start_idx, end_idx, dtype=int, num=num_partitions + 1)
35
+ return [(partition_boundaries[i], partition_boundaries[i + 1]) for i in range(num_partitions)]
36
+
37
+
38
+ def split_dataset(dataset, num_workers, worker_id, num_threads):
39
+ worker_splits = split_index(0, len(dataset), num_workers)
40
+ thread_splits = split_index(worker_splits[worker_id][0], worker_splits[worker_id][1], num_threads)
41
+ return worker_splits, thread_splits
42
+
43
+
44
+ def create_mmap_dataset_builder(fname, dtype):
45
+ logger.info(f"Creating mmap dataset builder at {fname}.")
46
+ return MMapIndexedDatasetBuilder(f"{fname}.bin", dtype=dtype)
47
+
48
+
49
+ def close_mmap_dataset_builder(builder, fname):
50
+ builder.end_document()
51
+ builder.finalize(f"{fname}.idx")
52
+ logger.info(f"Finalized mmap dataset builder at {fname}.")
venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/deepspeed/runtime/lr_schedules.py ADDED
@@ -0,0 +1,878 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Implementation of learning rate schedules.
7
+
8
+ Taken and modified from PyTorch v1.0.1 source
9
+ https://github.com/pytorch/pytorch/blob/v1.1.0/torch/optim/lr_scheduler.py
10
+ """
11
+
12
+ import argparse
13
+ from torch.optim import Optimizer
14
+ import math
15
+ from deepspeed.utils import logger
16
+
17
+ LR_SCHEDULE = 'lr_schedule'
18
+ LR_RANGE_TEST = 'LRRangeTest'
19
+ ONE_CYCLE = 'OneCycle'
20
+ WARMUP_LR = 'WarmupLR'
21
+ WARMUP_DECAY_LR = 'WarmupDecayLR'
22
+ WARMUP_COSINE_LR = 'WarmupCosineLR'
23
+ VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR, WARMUP_COSINE_LR]
24
+
25
+ LR_RANGE_TEST_MIN_LR = 'lr_range_test_min_lr'
26
+ LR_RANGE_TEST_STEP_RATE = 'lr_range_test_step_rate'
27
+ LR_RANGE_TEST_STEP_SIZE = 'lr_range_test_step_size'
28
+ LR_RANGE_TEST_STAIRCASE = 'lr_range_test_staircase'
29
+
30
+ EDGE_VALUE = 'edge_value'
31
+ MID_VALUE = 'mid_value'
32
+
33
+ CYCLE_FIRST_STEP_SIZE = 'cycle_first_step_size'
34
+ CYCLE_FIRST_STAIR_COUNT = 'cycle_first_stair_count'
35
+ CYCLE_SECOND_STEP_SIZE = 'cycle_second_step_size'
36
+ CYCLE_SECOND_STAIR_COUNT = 'cycle_second_stair_count'
37
+ DECAY_STEP_SIZE = 'decay_step_size'
38
+
39
+ CYCLE_MIN_LR = 'cycle_min_lr'
40
+ CYCLE_MAX_LR = 'cycle_max_lr'
41
+ DECAY_LR_RATE = 'decay_lr_rate'
42
+
43
+ CYCLE_MIN_MOM = 'cycle_min_mom'
44
+ CYCLE_MAX_MOM = 'cycle_max_mom'
45
+ DECAY_MOM_RATE = 'decay_mom_rate'
46
+
47
+ WARMUP_MIN_LR = 'warmup_min_lr'
48
+ WARMUP_MAX_LR = 'warmup_max_lr'
49
+ WARMUP_NUM_STEPS = 'warmup_num_steps'
50
+ WARMUP_TYPE = 'warmup_type'
51
+ WARMUP_LOG_RATE = 'log'
52
+ WARMUP_LINEAR_RATE = 'linear'
53
+
54
+ WARMUP_MIN_RATIO = 'warmup_min_ratio'
55
+ COS_MIN_RATIO = 'cos_min_ratio'
56
+
57
+ TOTAL_NUM_STEPS = 'total_num_steps'
58
+
59
+
60
+ def add_tuning_arguments(parser):
61
+ group = parser.add_argument_group('Convergence Tuning', 'Convergence tuning configurations')
62
+
63
+ # LR scheduler
64
+ group.add_argument('--lr_schedule', type=str, default=None, help='LR schedule for training.')
65
+
66
+ # Learning rate range test
67
+ group.add_argument("--lr_range_test_min_lr", type=float, default=0.001, help='Starting lr value.')
68
+ group.add_argument("--lr_range_test_step_rate", type=float, default=1.0, help='scaling rate for LR range test.')
69
+ group.add_argument("--lr_range_test_step_size", type=int, default=1000, help='training steps per LR change.')
70
+ group.add_argument("--lr_range_test_staircase",
71
+ type=bool,
72
+ default=False,
73
+ help='use staircase scaling for LR range test.')
74
+
75
+ # OneCycle schedule
76
+ group.add_argument("--cycle_first_step_size",
77
+ type=int,
78
+ default=1000,
79
+ help='size of first step of 1Cycle schedule (training steps).')
80
+ group.add_argument("--cycle_first_stair_count",
81
+ type=int,
82
+ default=-1,
83
+ help='first stair count for 1Cycle schedule.')
84
+ group.add_argument("--cycle_second_step_size",
85
+ type=int,
86
+ default=-1,
87
+ help='size of second step of 1Cycle schedule (default first_step_size).')
88
+ group.add_argument("--cycle_second_stair_count",
89
+ type=int,
90
+ default=-1,
91
+ help='second stair count for 1Cycle schedule.')
92
+ group.add_argument("--decay_step_size",
93
+ type=int,
94
+ default=1000,
95
+ help='size of intervals for applying post cycle decay (training steps).')
96
+
97
+ # 1Cycle LR
98
+ group.add_argument("--cycle_min_lr", type=float, default=0.01, help='1Cycle LR lower bound.')
99
+ group.add_argument("--cycle_max_lr", type=float, default=0.1, help='1Cycle LR upper bound.')
100
+ group.add_argument("--decay_lr_rate", type=float, default=0.0, help='post cycle LR decay rate.')
101
+
102
+ # 1Cycle Momentum
103
+ group.add_argument('--cycle_momentum', default=False, action='store_true', help='Enable 1Cycle momentum schedule.')
104
+ group.add_argument("--cycle_min_mom", type=float, default=0.8, help='1Cycle momentum lower bound.')
105
+ group.add_argument("--cycle_max_mom", type=float, default=0.9, help='1Cycle momentum upper bound.')
106
+ group.add_argument("--decay_mom_rate", type=float, default=0.0, help='post cycle momentum decay rate.')
107
+
108
+ # Warmup LR
109
+ group.add_argument('--warmup_min_lr', type=float, default=0, help='WarmupLR minimum/initial LR value')
110
+ group.add_argument('--warmup_max_lr', type=float, default=0.001, help='WarmupLR maximum LR value.')
111
+ group.add_argument('--warmup_num_steps', type=int, default=1000, help='WarmupLR step count for LR warmup.')
112
+ group.add_argument('--warmup_type',
113
+ type=str,
114
+ default=WARMUP_LOG_RATE,
115
+ help='WarmupLR increasing function during warmup')
116
+
117
+ # WarmUP cos LR
118
+ group.add_argument("--warmup_min_ratio", type=float, default=0.01, help='Cosine LR lower bound.')
119
+ group.add_argument("--cos_min_ratio", type=float, default=0.01, help='Cosine LR lower bound.')
120
+
121
+ return parser
122
+
123
+
124
+ def parse_arguments():
125
+ parser = argparse.ArgumentParser()
126
+ parser = add_tuning_arguments(parser)
127
+
128
+ lr_sched_args, unknown_args = parser.parse_known_args()
129
+ return lr_sched_args, unknown_args
130
+
131
+
132
+ def override_lr_range_test_params(args, params):
133
+ if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None:
134
+ params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr
135
+
136
+ if hasattr(args, LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None:
137
+ params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate
138
+
139
+ if hasattr(args, LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None:
140
+ params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size
141
+
142
+ if hasattr(args, LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None:
143
+ params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase
144
+
145
+
146
+ def override_1cycle_params(args, params):
147
+ if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None:
148
+ params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size
149
+
150
+ if hasattr(args, CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None:
151
+ params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count
152
+
153
+ if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None:
154
+ params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size
155
+
156
+ if hasattr(args, CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None:
157
+ params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count
158
+
159
+ if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None:
160
+ params[DECAY_STEP_SIZE] = args.decay_step_size
161
+
162
+ # 1Cycle LR params
163
+ if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None:
164
+ params[CYCLE_MIN_LR] = args.cycle_min_lr
165
+
166
+ if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None:
167
+ params[CYCLE_MAX_LR] = args.cycle_max_lr
168
+
169
+ if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None:
170
+ params[DECAY_LR_RATE] = args.decay_lr_rate
171
+
172
+ # 1Cycle MOM params
173
+ if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None:
174
+ params[CYCLE_MIN_MOM] = args.cycle_min_mom
175
+
176
+ if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None:
177
+ params[CYCLE_MAX_MOM] = args.cycle_max_mom
178
+
179
+ if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None:
180
+ params[DECAY_MOM_RATE] = args.decay_mom_rate
181
+
182
+
183
+ def override_warmupLR_params(args, params):
184
+ if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None:
185
+ params[WARMUP_MIN_LR] = args.warmup_min_lr
186
+
187
+ if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None:
188
+ params[WARMUP_MAX_LR] = args.warmup_max_lr
189
+
190
+ if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None:
191
+ params[WARMUP_NUM_STEPS] = args.warmup_num_steps
192
+
193
+ if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None:
194
+ params[WARMUP_TYPE] = args.warmup_type
195
+
196
+
197
+ def override_params(args, params):
198
+ # LR range test params
199
+ override_lr_range_test_params(args, params)
200
+
201
+ # 1Cycle params
202
+ override_1cycle_params(args, params)
203
+
204
+ # WarmupLR params
205
+ override_warmupLR_params(args, params)
206
+
207
+
208
+ def get_config_from_args(args):
209
+ if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None:
210
+ return None, '--{} not specified on command line'.format(LR_SCHEDULE)
211
+
212
+ if not args.lr_schedule in VALID_LR_SCHEDULES:
213
+ return None, '{} is not supported LR schedule'.format(args.lr_schedule)
214
+
215
+ config = {}
216
+ config['type'] = args.lr_schedule
217
+ config['params'] = {}
218
+
219
+ if args.lr_schedule == LR_RANGE_TEST:
220
+ override_lr_range_test_params(args, config['params'])
221
+ elif args.lr_schedule == ONE_CYCLE:
222
+ override_1cycle_params(args, config['params'])
223
+ else:
224
+ override_warmupLR_params(args, config['params'])
225
+
226
+ return config, None
227
+
228
+
229
+ def get_lr_from_config(config):
230
+ if not 'type' in config:
231
+ return None, 'LR schedule type not defined in config'
232
+
233
+ if not 'params' in config:
234
+ return None, 'LR schedule params not defined in config'
235
+
236
+ lr_schedule = config['type']
237
+ lr_params = config['params']
238
+
239
+ if not lr_schedule in VALID_LR_SCHEDULES:
240
+ return None, '{} is not a valid LR schedule'.format(lr_schedule)
241
+
242
+ if lr_schedule == LR_RANGE_TEST:
243
+ return lr_params[LR_RANGE_TEST_MIN_LR], ''
244
+ if lr_schedule == ONE_CYCLE:
245
+ return lr_params[CYCLE_MAX_LR], ''
246
+ # Warmup LR
247
+ return lr_params[WARMUP_MAX_LR], ''
248
+
249
+
250
+ """
251
+ Only optimizers that are subclass of torch.optim.Optimizer are supported. So check the passed optimizer and wrapped
252
+ optimizer to see if requirement is satisfied.
253
+ TODO: Looking under the hood to examine the wrapped optimizer is a hack that requires a better long-term fix.
254
+ """
255
+
256
+
257
+ def get_torch_optimizer(optimizer):
258
+ if isinstance(optimizer, Optimizer):
259
+ return optimizer
260
+
261
+ if hasattr(optimizer, 'optimizer') and isinstance(optimizer.optimizer, Optimizer):
262
+ return optimizer.optimizer
263
+
264
+ raise TypeError('{} is not a subclass of torch.optim.Optimizer'.format(type(optimizer).__name__))
265
+
266
+
267
+ class LRRangeTest(object):
268
+ """Sets the learning rate of each parameter group according to
269
+ learning rate range test (LRRT) policy. The policy increases learning
270
+ rate starting from a base value with a constant frequency, as detailed in
271
+ the paper `A disciplined approach to neural network hyper-parameters: Part1`_.
272
+
273
+ LRRT policy is used for finding maximum LR that trains a model without divergence, and can be used to
274
+ configure the LR boundaries for Cyclic LR schedules.
275
+
276
+ LRRT changes the learning rate after every batch.
277
+ `step` should be called after a batch has been used for training.
278
+
279
+ Args:
280
+ optimizer (Optimizer): Wrapped optimizer.
281
+ lr_range_test_min_lr (float or list): Initial learning rate which is the
282
+ lower boundary in the range test for each parameter group.
283
+ lr_range_test_step_size (int): Interval of training steps to increase learning rate. Default: 2000
284
+ lr_range_test_step_rate (float): Scaling rate for range test. Default: 1.0
285
+ lr_range_test_staircase (bool): Scale in staircase fashion, rather than continuous. Default: False.
286
+ last_batch_iteration (int): The index of the last batch. This parameter is used when
287
+ resuming a training job. Since `step()` should be invoked after each
288
+ batch instead of after each epoch, this number represents the total
289
+ number of *batches* computed, not the total number of epochs computed.
290
+ When last_batch_iteration=-1, the schedule is started from the beginning.
291
+ Default: -1
292
+
293
+ Example:
294
+ >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
295
+ >>> scheduler = LRRangeTest(optimizer)
296
+ >>> data_loader = torch.utils.data.DataLoader(...)
297
+ >>> for epoch in range(10):
298
+ >>> for batch in data_loader:
299
+ >>> train_batch(...)
300
+ >>> scheduler.step()
301
+
302
+ _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay:
303
+ https://arxiv.org/abs/1803.09820
304
+ """
305
+
306
+ def __init__(self,
307
+ optimizer: Optimizer,
308
+ lr_range_test_min_lr: float = 1e-3,
309
+ lr_range_test_step_size: int = 2000,
310
+ lr_range_test_step_rate: float = 1.0,
311
+ lr_range_test_staircase: bool = False,
312
+ last_batch_iteration: int = -1):
313
+
314
+ self.optimizer = get_torch_optimizer(optimizer)
315
+
316
+ if isinstance(lr_range_test_min_lr, list) or isinstance(lr_range_test_min_lr, tuple):
317
+ if len(lr_range_test_min_lr) != len(self.optimizer.param_groups):
318
+ raise ValueError("expected {} lr_range_test_min_lr, got {}".format(len(self.optimizer.param_groups),
319
+ len(lr_range_test_min_lr)))
320
+ self.min_lr = list(lr_range_test_min_lr)
321
+ else:
322
+ self.min_lr = [lr_range_test_min_lr] * len(self.optimizer.param_groups)
323
+
324
+ self.step_size = lr_range_test_step_size
325
+ self.step_rate = lr_range_test_step_rate
326
+ self.last_batch_iteration = last_batch_iteration
327
+ self.staircase = lr_range_test_staircase
328
+ self.interval_fn = self._staircase_interval if lr_range_test_staircase else self._continuous_interval
329
+
330
+ if last_batch_iteration == -1:
331
+ self._update_optimizer(self.min_lr)
332
+
333
+ def _staircase_interval(self):
334
+ return math.floor(float(self.last_batch_iteration + 1) / self.step_size)
335
+
336
+ def _continuous_interval(self):
337
+ return float(self.last_batch_iteration + 1) / self.step_size
338
+
339
+ def _get_increase(self):
340
+ return (1 + self.step_rate * self.interval_fn())
341
+
342
+ def get_lr(self):
343
+ lr_increase = self._get_increase()
344
+ return [lr_range_test_min_lr * lr_increase for lr_range_test_min_lr in self.min_lr]
345
+
346
+ def get_last_lr(self):
347
+ """ Return last computed learning rate by current scheduler.
348
+ """
349
+ assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
350
+ return self._last_lr
351
+
352
+ def _update_optimizer(self, group_lrs):
353
+ for param_group, lr in zip(self.optimizer.param_groups, group_lrs):
354
+ param_group['lr'] = lr
355
+
356
+ def step(self, batch_iteration=None):
357
+ if batch_iteration is None:
358
+ batch_iteration = self.last_batch_iteration + 1
359
+ self.last_batch_iteration = batch_iteration
360
+ self._update_optimizer(self.get_lr())
361
+ self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
362
+
363
+ def state_dict(self):
364
+ return {'last_batch_iteration': self.last_batch_iteration}
365
+
366
+ def load_state_dict(self, sd):
367
+ self.last_batch_iteration = sd['last_batch_iteration']
368
+
369
+
370
+ class OneCycle(object):
371
+ """Sets the learning rate of each parameter group according to
372
+ 1Cycle learning rate policy (1CLR). 1CLR is a variation of the
373
+ Cyclical Learning Rate (CLR) policy that involves one cycle followed by
374
+ decay. The policy simultaneously cycles the learning rate (and momentum)
375
+ between two boundaries with a constant frequency, as detailed in
376
+ the paper `A disciplined approach to neural network hyper-parameters`_.
377
+
378
+ 1CLR policy changes the learning rate after every batch.
379
+ `step` should be called after a batch has been used for training.
380
+
381
+ This implementation was adapted from the github repo: `pytorch/pytorch`_
382
+
383
+ Args:
384
+ optimizer (Optimizer): Wrapped optimizer.
385
+ cycle_min_lr (float or list): Initial learning rate which is the
386
+ lower boundary in the cycle for each parameter group.
387
+ cycle_max_lr (float or list): Upper learning rate boundaries in the cycle
388
+ for each parameter group. Functionally,
389
+ it defines the cycle amplitude (cycle_max_lr - cycle_min_lr).
390
+ The lr at any cycle is the sum of cycle_min_lr
391
+ and some scaling of the amplitude; therefore
392
+ cycle_max_lr may not actually be reached depending on
393
+ scaling function.
394
+ decay_lr_rate(float): Decay rate for learning rate. Default: 0.
395
+ cycle_first_step_size (int): Number of training iterations in the
396
+ increasing half of a cycle. Default: 2000
397
+ cycle_second_step_size (int): Number of training iterations in the
398
+ decreasing half of a cycle. If cycle_second_step_size is None,
399
+ it is set to cycle_first_step_size. Default: None
400
+ cycle_first_stair_count(int): Number of stairs in first half of cycle phase. This means
401
+ lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
402
+ cycle_second_stair_count(int): Number of stairs in second half of cycle phase. This means
403
+ lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
404
+ decay_step_size (int): Intervals for applying decay in decay phase. Default: 0, means no decay.
405
+ cycle_momentum (bool): If ``True``, momentum is cycled inversely
406
+ to learning rate between 'cycle_min_mom' and 'cycle_max_mom'.
407
+ Default: True
408
+ cycle_min_mom (float or list): Initial momentum which is the
409
+ lower boundary in the cycle for each parameter group.
410
+ Default: 0.8
411
+ cycle_max_mom (float or list): Upper momentum boundaries in the cycle
412
+ for each parameter group. Functionally,
413
+ it defines the cycle amplitude (cycle_max_mom - cycle_min_mom).
414
+ The momentum at any cycle is the difference of cycle_max_mom
415
+ and some scaling of the amplitude; therefore
416
+ cycle_min_mom may not actually be reached depending on
417
+ scaling function. Default: 0.9
418
+ decay_mom_rate (float): Decay rate for momentum. Default: 0.
419
+ last_batch_iteration (int): The index of the last batch. This parameter is used when
420
+ resuming a training job. Since `step()` should be invoked after each
421
+ batch instead of after each epoch, this number represents the total
422
+ number of *batches* computed, not the total number of epochs computed.
423
+ When last_batch_iteration=-1, the schedule is started from the beginning.
424
+ Default: -1
425
+
426
+ Example:
427
+ >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
428
+ >>> scheduler = OneCycle(optimizer, 0.0001, 0.0010)
429
+ >>> data_loader = torch.utils.data.DataLoader(...)
430
+ >>> for epoch in range(10):
431
+ >>> for batch in data_loader:
432
+ >>> train_batch(...)
433
+ >>> scheduler.step()
434
+
435
+
436
+ .. _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay: https://arxiv.org/abs/1803.09820
437
+ """
438
+
439
+ def __init__(self,
440
+ optimizer,
441
+ cycle_min_lr,
442
+ cycle_max_lr,
443
+ decay_lr_rate=0.,
444
+ cycle_first_step_size=2000,
445
+ cycle_second_step_size=None,
446
+ cycle_first_stair_count=0,
447
+ cycle_second_stair_count=None,
448
+ decay_step_size=0,
449
+ cycle_momentum=True,
450
+ cycle_min_mom=0.8,
451
+ cycle_max_mom=0.9,
452
+ decay_mom_rate=0.,
453
+ last_batch_iteration=-1):
454
+
455
+ self.optimizer = get_torch_optimizer(optimizer)
456
+
457
+ # Initialize cycle shape
458
+ self._initialize_cycle(cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
459
+ cycle_second_stair_count, decay_step_size)
460
+
461
+ # Initialize cycle lr
462
+ self._initialize_lr(self.optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration)
463
+
464
+ # Initialize cyclic momentum
465
+ self.cycle_momentum = cycle_momentum
466
+ if cycle_momentum:
467
+ self._initialize_momentum(self.optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate,
468
+ last_batch_iteration)
469
+ # Initialize batch iteration tracker
470
+ self.last_batch_iteration = last_batch_iteration
471
+
472
+ # Configure cycle shape
473
+
474
+ def _initialize_cycle(self, cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
475
+ cycle_second_stair_count, decay_step_size):
476
+ cycle_first_step_size = float(cycle_first_step_size)
477
+ cycle_second_step_size = float(
478
+ cycle_second_step_size) if cycle_second_step_size is not None else cycle_first_step_size
479
+
480
+ self.total_size = cycle_first_step_size + cycle_second_step_size
481
+ self.step_ratio = cycle_first_step_size / self.total_size
482
+ self.first_stair_count = cycle_first_stair_count
483
+ self.second_stair_count = cycle_first_stair_count if cycle_second_stair_count is None else cycle_second_stair_count
484
+ self.decay_step_size = decay_step_size
485
+
486
+ if math.isclose(self.decay_step_size, 0):
487
+ self.skip_lr_decay = True
488
+ self.skip_mom_decay = True
489
+ else:
490
+ self.skip_lr_decay = False
491
+ self.skip_mom_decay = False
492
+
493
+ # Configure lr schedule
494
+ def _initialize_lr(self, optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration):
495
+ self.min_lrs = [cycle_min_lr] * len(optimizer.param_groups)
496
+ if last_batch_iteration == -1:
497
+ for lr, group in zip(self.min_lrs, optimizer.param_groups):
498
+ group['lr'] = lr
499
+
500
+ self.max_lrs = [cycle_max_lr] * len(optimizer.param_groups)
501
+ self.decay_lr_rate = decay_lr_rate
502
+
503
+ if math.isclose(self.decay_lr_rate, 0):
504
+ self.skip_lr_decay = True
505
+
506
+ # Configure momentum schedule
507
+ def _initialize_momentum(self, optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate, last_batch_iteration):
508
+ if 'betas' not in optimizer.defaults:
509
+ optimizer_name = type(optimizer).__name__
510
+ logger.warn(
511
+ f"cycle_momentum is disabled because optimizer {optimizer_name} does not support momentum, no betas attribute in defaults"
512
+ )
513
+ self.cycle_momentum = False
514
+ return
515
+
516
+ self.decay_mom_rate = decay_mom_rate
517
+ self.min_moms = [(cycle_min_mom, 0.99)] * len(optimizer.param_groups)
518
+ self.max_moms = [(cycle_max_mom, 0.99)] * len(optimizer.param_groups)
519
+
520
+ if last_batch_iteration == -1:
521
+ for momentum, group in zip(self.min_moms, optimizer.param_groups):
522
+ group['betas'] = momentum
523
+
524
+ if math.isclose(self.decay_mom_rate, 0):
525
+ self.skip_mom_decay = True
526
+
527
+ def _get_scale_factor(self):
528
+ batch_iteration = (self.last_batch_iteration + 1)
529
+ cycle = math.floor(1 + batch_iteration / self.total_size)
530
+ x = 1. + batch_iteration / self.total_size - cycle
531
+ if x <= self.step_ratio:
532
+ scale_factor = x / self.step_ratio
533
+ else:
534
+ scale_factor = (x - 1) / (self.step_ratio - 1)
535
+
536
+ return scale_factor
537
+
538
+ def _get_cycle_mom(self):
539
+ scale_factor = self._get_scale_factor()
540
+ momentums = []
541
+ for base_betas, max_betas in zip(self.min_moms, self.max_moms):
542
+ cycle_min_mom = base_betas[0]
543
+ cycle_max_mom = max_betas[0]
544
+ base_height = (cycle_max_mom - cycle_min_mom) * scale_factor
545
+ momentum = cycle_max_mom - base_height
546
+ momentums.append((momentum, base_betas[1]))
547
+ return momentums
548
+
549
+ def _get_cycle_lr(self):
550
+ scale_factor = self._get_scale_factor()
551
+ lrs = []
552
+ for cycle_min_lr, cycle_max_lr in zip(self.min_lrs, self.max_lrs):
553
+ base_height = (cycle_max_lr - cycle_min_lr) * scale_factor
554
+ lr = cycle_min_lr + base_height
555
+ lrs.append(lr)
556
+
557
+ return lrs
558
+
559
+ def _get_decay_mom(self, decay_batch_iteration):
560
+ if self.skip_mom_decay:
561
+ return self.max_moms
562
+
563
+ decay_interval = decay_batch_iteration / self.decay_step_size
564
+ mom_decay_factor = (1 + self.decay_mom_rate * decay_interval)
565
+ momentums = [(beta0 * mom_decay_factor, beta1) for beta0, beta1 in self.max_moms]
566
+
567
+ return momentums
568
+
569
+ def _get_decay_lr(self, decay_batch_iteration):
570
+ """Calculates the learning rate at batch index. This function is used
571
+ after the cycle completes and post cycle decaying of lr/mom is enabled.
572
+ This function treats `self.last_batch_iteration` as the last batch index.
573
+ """
574
+ if self.skip_lr_decay:
575
+ return self.min_lrs
576
+
577
+ decay_interval = decay_batch_iteration / self.decay_step_size
578
+ lr_decay_factor = (1 + self.decay_lr_rate * decay_interval)
579
+ lrs = [cycle_min_lr / lr_decay_factor for cycle_min_lr in self.min_lrs]
580
+
581
+ return lrs
582
+
583
+ def get_lr(self):
584
+ """Calculates the learning rate at batch index. This function treats
585
+ `self.last_batch_iteration` as the last batch index.
586
+ """
587
+ if self.last_batch_iteration < self.total_size:
588
+ return self._get_cycle_lr()
589
+ return self._get_decay_lr(self.last_batch_iteration - self.total_size + 1)
590
+
591
+ def get_mom(self):
592
+ """Calculates the momentum at batch index. This function treats
593
+ `self.last_batch_iteration` as the last batch index.
594
+ """
595
+ if not self.cycle_momentum:
596
+ return None
597
+
598
+ if self.last_batch_iteration < self.total_size:
599
+ return self._get_cycle_mom()
600
+ return self._get_decay_mom(self.last_batch_iteration - self.total_size + 1)
601
+
602
+ def get_last_lr(self):
603
+ """ Return last computed learning rate by current scheduler.
604
+ """
605
+ assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
606
+ return self._last_lr
607
+
608
+ def step(self, batch_iteration=None):
609
+ """ Updates the optimizer with the learning rate for the last batch index.
610
+ `self.last_batch_iteration` is treated as the last batch index.
611
+
612
+ If self.cycle_momentum is true, also updates optimizer momentum.
613
+ """
614
+ if batch_iteration is None:
615
+ batch_iteration = self.last_batch_iteration + 1
616
+
617
+ self.last_batch_iteration = batch_iteration
618
+ for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
619
+ param_group['lr'] = lr
620
+ self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
621
+
622
+ if self.cycle_momentum:
623
+ momentums = self.get_mom()
624
+ for param_group, momentum in zip(self.optimizer.param_groups, momentums):
625
+ param_group['betas'] = momentum
626
+
627
+ def state_dict(self):
628
+ return {'last_batch_iteration': self.last_batch_iteration}
629
+
630
+ def load_state_dict(self, sd):
631
+ self.last_batch_iteration = sd['last_batch_iteration']
632
+
633
+
634
+ class WarmupLR(object):
635
+ """Increase the learning rate of each parameter group from min lr to max lr
636
+ over warmup_num_steps steps, and then fix at max lr.
637
+
638
+ Args:
639
+ optimizer (Optimizer): Wrapped optimizer.
640
+ warmup_min_lr (float or list): minimum learning rate. Default: 0
641
+ warmup_max_lr (float or list): maximum learning rate. Default: 0.001
642
+ warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
643
+ warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
644
+ last_batch_iteration (int): The index of the last batch. Default: -1.
645
+ Example:
646
+ >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
647
+ >>> scheduler = WarmupLR(optimizer)
648
+ >>> data_loader = torch.utils.data.DataLoader(...)
649
+ >>> for epoch in range(10):
650
+ >>> for batch in data_loader:
651
+ >>> train_batch(...)
652
+ >>> scheduler.step()
653
+
654
+ """
655
+
656
+ def __init__(self,
657
+ optimizer: Optimizer,
658
+ warmup_min_lr: float = 0.0,
659
+ warmup_max_lr: float = 0.001,
660
+ warmup_num_steps: int = 1000,
661
+ warmup_type: str = WARMUP_LOG_RATE,
662
+ last_batch_iteration: int = -1):
663
+
664
+ self.optimizer = get_torch_optimizer(optimizer)
665
+
666
+ self.min_lrs = self._format_param(self.optimizer, warmup_min_lr, "min_lr")
667
+ self.max_lrs = self._format_param(self.optimizer, warmup_max_lr, "max_lr")
668
+ self.delta_lrs = [big - small for big, small in zip(self.max_lrs, self.min_lrs)]
669
+ self.warmup_num_steps = max(2, warmup_num_steps)
670
+ # Currently only support linear and log function
671
+ if warmup_type not in {WARMUP_LOG_RATE, WARMUP_LINEAR_RATE}:
672
+ logger.warning(f"Using unknown warmup_type: {warmup_type}. The increasing function "
673
+ f"is set to default (log)")
674
+ warmup_type = WARMUP_LOG_RATE
675
+ self.warmup_type = warmup_type
676
+ self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps)
677
+ self.last_batch_iteration = last_batch_iteration
678
+
679
+ def get_lr(self):
680
+ if self.last_batch_iteration < 0:
681
+ logger.warning("Attempting to get learning rate from scheduler before it has started")
682
+ return [0.0]
683
+ gamma = self._get_gamma()
684
+ return [min_lr + (delta_lr * gamma) for min_lr, delta_lr in zip(self.min_lrs, self.delta_lrs)]
685
+
686
+ def get_last_lr(self):
687
+ """ Return last computed learning rate by current scheduler.
688
+ """
689
+ assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
690
+ return self._last_lr
691
+
692
+ def step(self, last_batch_iteration=None):
693
+ if last_batch_iteration is None:
694
+ last_batch_iteration = self.last_batch_iteration + 1
695
+ self.last_batch_iteration = last_batch_iteration
696
+ for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
697
+ param_group['lr'] = lr
698
+ self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
699
+
700
+ def state_dict(self):
701
+ return {'last_batch_iteration': self.last_batch_iteration}
702
+
703
+ def load_state_dict(self, sd):
704
+ self.last_batch_iteration = sd['last_batch_iteration']
705
+
706
+ def _get_gamma(self):
707
+ if self.last_batch_iteration < self.warmup_num_steps:
708
+ if self.warmup_type == WARMUP_LOG_RATE:
709
+ return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
710
+ elif self.warmup_type == WARMUP_LINEAR_RATE:
711
+ return self.last_batch_iteration / self.warmup_num_steps
712
+ return 1.0
713
+
714
+ def _format_param(self, optimizer, param_value, param_name):
715
+ if isinstance(param_value, list) or isinstance(param_value, tuple):
716
+ if len(param_value) != len(optimizer.param_groups):
717
+ raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name,
718
+ FileNotFoundError(param_value)))
719
+ return list(param_value)
720
+ return [param_value] * len(optimizer.param_groups)
721
+
722
+
723
+ class WarmupDecayLR(WarmupLR):
724
+ """Increase the learning rate of each parameter group from min lr to max lr
725
+ over warmup_num_steps steps, and then decay at linear rate over the remaining training steps.
726
+
727
+ Args:
728
+ optimizer (Optimizer): Wrapped optimizer.
729
+ total_num_steps (int): total number of training steps
730
+ warmup_min_lr (float or list): minimum learning rate. Default: 0
731
+ warmup_max_lr (float or list): maximum learning rate. Default: 0.001
732
+ warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
733
+ warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
734
+ last_batch_iteration (int): The index of the last batch. Default: -1.
735
+ Example:
736
+ >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
737
+ >>> scheduler = WarmupDecayLR(optimizer, 1000000)
738
+ >>> data_loader = torch.utils.data.DataLoader(...)
739
+ >>> for epoch in range(10):
740
+ >>> for batch in data_loader:
741
+ >>> train_batch(...)
742
+ >>> scheduler.step()
743
+
744
+ """
745
+
746
+ def __init__(self,
747
+ optimizer: Optimizer,
748
+ total_num_steps: int,
749
+ warmup_min_lr: float = 0.0,
750
+ warmup_max_lr: float = 0.001,
751
+ warmup_num_steps: int = 1000,
752
+ warmup_type: str = WARMUP_LOG_RATE,
753
+ last_batch_iteration: int = -1):
754
+
755
+ self.total_num_steps = total_num_steps
756
+ super(WarmupDecayLR, self).__init__(optimizer, warmup_min_lr, warmup_max_lr, warmup_num_steps, warmup_type,
757
+ last_batch_iteration)
758
+ if self.total_num_steps < self.warmup_num_steps:
759
+ logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format(
760
+ total_num_steps, warmup_num_steps))
761
+
762
+ def _get_gamma(self):
763
+ if self.last_batch_iteration < self.warmup_num_steps:
764
+ if self.warmup_type == WARMUP_LOG_RATE:
765
+ return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
766
+ elif self.warmup_type == WARMUP_LINEAR_RATE:
767
+ return self.last_batch_iteration / self.warmup_num_steps
768
+ return max(
769
+ 0.0,
770
+ float(self.total_num_steps - self.last_batch_iteration) /
771
+ float(max(1.0, self.total_num_steps - self.warmup_num_steps)))
772
+
773
+
774
+ class WarmupCosineLR(object):
775
+ """Increase the learning rate of each parameter group from min lr ratio to max lr ratio
776
+ over warmup_num_steps steps, and then decay at cosine rate over the remaining training steps to min cosine ratio.
777
+
778
+ Args:
779
+ optimizer (Optimizer): Wrapped optimizer.
780
+ total_num_steps (int): total number of training steps
781
+ warmup_min_ratio (float or list): warmup start learning rate ratio. Default: 0
782
+ warmup_num_steps (int): number of steps to warm up from warmup_min_ratio to 1.0. Default: 1000
783
+ warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
784
+ cos_min_ratio (float): cosine end learning rate ratio. Default: 0.0001
785
+ last_batch_iteration (int): The index of the last batch. Default: -1.
786
+ Example:
787
+ >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
788
+ >>> scheduler = WarmupCosineLR(optimizer, 1000000)
789
+ >>> data_loader = torch.utils.data.DataLoader(...)
790
+ >>> for epoch in range(10):
791
+ >>> for batch in data_loader:
792
+ >>> train_batch(...)
793
+ >>> scheduler.step()
794
+
795
+ """
796
+
797
+ def __init__(self,
798
+ optimizer: Optimizer,
799
+ total_num_steps: int,
800
+ warmup_min_ratio: float = 0.0,
801
+ warmup_num_steps: int = 1000,
802
+ cos_min_ratio: float = 0.0001,
803
+ warmup_type: str = WARMUP_LOG_RATE,
804
+ last_batch_iteration: int = -1):
805
+
806
+ self.optimizer = get_torch_optimizer(optimizer)
807
+
808
+ self.total_num_steps = total_num_steps
809
+ self.last_batch_iteration = last_batch_iteration
810
+ self.cos_min_ratio = cos_min_ratio
811
+
812
+ self.warmup_type = warmup_type
813
+ self.warmup_min_ratio = warmup_min_ratio
814
+ self.warmup_num_steps = max(2, warmup_num_steps)
815
+ self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps)
816
+
817
+ if self.total_num_steps < self.warmup_num_steps:
818
+ logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format(
819
+ total_num_steps, warmup_num_steps))
820
+ self.org_lrs = [group['lr'] for group in self.optimizer.param_groups]
821
+
822
+ def get_lr_ratio(self):
823
+ if self.last_batch_iteration < 0:
824
+ logger.warning("Attempting to get learning rate from scheduler before it has started")
825
+ return [0.0]
826
+
827
+ if self.last_batch_iteration < self.warmup_num_steps:
828
+ if self.warmup_type == WARMUP_LOG_RATE:
829
+ ratio = self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
830
+ elif self.warmup_type == WARMUP_LINEAR_RATE:
831
+ ratio = self.last_batch_iteration / self.warmup_num_steps
832
+ ratio_delta = 1. - self.warmup_min_ratio
833
+ ratio = self.warmup_min_ratio + ratio * ratio_delta
834
+ return ratio
835
+
836
+ real_last_step = self.last_batch_iteration - self.warmup_num_steps + 1
837
+ real_total_steps = self.total_num_steps - self.warmup_num_steps
838
+ ratio_delta = 1. - self.cos_min_ratio
839
+ ratio = (1 + math.cos(math.pi * real_last_step / real_total_steps)) / 2
840
+ ratio = max(0.0, self.cos_min_ratio + ratio_delta * ratio)
841
+ return ratio
842
+
843
+ def step(self, last_batch_iteration=None):
844
+ if last_batch_iteration is None:
845
+ last_batch_iteration = self.last_batch_iteration + 1
846
+ self.last_batch_iteration = last_batch_iteration
847
+
848
+ lrs = self.get_lr()
849
+ for param_group, lr in zip(self.optimizer.param_groups, lrs):
850
+ param_group['lr'] = lr
851
+ self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
852
+
853
+ def get_lr(self):
854
+ if self.last_batch_iteration < 0:
855
+ logger.warning("Attempting to get learning rate from scheduler before it has started")
856
+ return [0.0]
857
+ lr_ratio = self.get_lr_ratio()
858
+ return [org_lr * lr_ratio for org_lr in self.org_lrs]
859
+
860
+ def get_last_lr(self):
861
+ """ Return last computed learning rate by current scheduler.
862
+ """
863
+ assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
864
+ return self._last_lr
865
+
866
+ def state_dict(self):
867
+ return {'last_batch_iteration': self.last_batch_iteration}
868
+
869
+ def load_state_dict(self, sd):
870
+ self.last_batch_iteration = sd['last_batch_iteration']
871
+
872
+ def _format_param(self, optimizer, param_value, param_name):
873
+ if isinstance(param_value, list) or isinstance(param_value, tuple):
874
+ if len(param_value) != len(optimizer.param_groups):
875
+ raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name,
876
+ FileNotFoundError(param_value)))
877
+ return list(param_value)
878
+ return [param_value] * len(optimizer.param_groups)
venv/lib/python3.10/site-packages/deepspeed/runtime/quantize.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import math
8
+ from deepspeed.utils import logger
9
+ from deepspeed.ops.quantizer import ds_quantizer
10
+
11
+ TWO_D_PARAMS = 6
12
+
13
+
14
+ class Quantizer(object):
15
+
16
+ def __init__(self,
17
+ q_groups=1,
18
+ q_mixed_fp16=False,
19
+ q_change_ratio=0.01,
20
+ q_type=0,
21
+ q_rounding=0,
22
+ q_verbose=False,
23
+ q_eigenvalue=False,
24
+ use_quantizer_kernel=False,
25
+ layer_num=0):
26
+
27
+ self.q_groups = q_groups
28
+ self.q_mixed_fp16 = q_mixed_fp16
29
+ self.q_change_ratio = q_change_ratio
30
+ self.q_type = q_type
31
+ self.qsteps = 0
32
+ self.quantize_real_ratio = 1.000
33
+ self.q_verbose = q_verbose
34
+ self.q_eigenvalue = q_eigenvalue
35
+ self.use_quantizer_kernel = use_quantizer_kernel
36
+ self.q_rounding = q_rounding
37
+ self.layer_num = layer_num
38
+
39
+ def any_precision_switch(self):
40
+ # Temporary disabled functionality
41
+ if self.layer_num == 0:
42
+ return True
43
+ result = False
44
+ for index in range(self.layer_num):
45
+ if self.q_start_bits[index] != self.q_target_bits:
46
+ next_step = self.qsteps + (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1))
47
+ if next_step >= self.q_period[index]:
48
+ result = True
49
+ return result
50
+
51
+ def quantize(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}):
52
+
53
+ if overflow and not eigenvalue_enabled:
54
+ return
55
+
56
+ self.step()
57
+
58
+ self.update_fp16_ratio()
59
+
60
+ for i in range(len(parameter_group)):
61
+ for p in parameter_group[i]:
62
+ if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits:
63
+ param_id = id(p)
64
+ if block_eigenvalue is None:
65
+ eigenvalue, layer_id = None, 0
66
+ else:
67
+ eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None,
68
+ 0)
69
+ if eigenvalue is not None:
70
+ factor = 1 + math.floor(eigenvalue * 4)
71
+ p.data = self.compute_quantization(p.data, layer_id, factor)
72
+ else:
73
+ p.data = self.compute_quantization(p, layer_id)
74
+
75
+ def step(self):
76
+ self.qsteps += 1
77
+
78
+ def quantize_highbit(self, inputs, num_bits):
79
+
80
+ q_range = 2**num_bits
81
+ input_flat = inputs.reshape(self.q_groups, -1)
82
+ g_min = input_flat.amin(dim=-1, keepdim=True)
83
+ g_max = input_flat.amax(dim=-1, keepdim=True)
84
+
85
+ # Random number generator (Uniform)
86
+ if self.q_rounding == 'nearest':
87
+ p = 0.
88
+ else:
89
+ p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5)
90
+
91
+ if self.q_type == 'symmetric':
92
+ scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range
93
+ zero_point = 0.
94
+ input_flat = (input_flat / scale + p).round().clamp(-(q_range >> 1), (q_range >> 1) - 1) * scale
95
+ elif self.q_type == 'asymmetric':
96
+ scale = (g_max - g_min) / q_range
97
+ zero_point = (g_min / scale).round() * scale
98
+ input_flat = ((input_flat - zero_point) / scale + p).round().clamp(0, (q_range - 1)) * scale + zero_point
99
+ output = input_flat.reshape(inputs.shape).contiguous()
100
+ return output
101
+
102
+ def quantize_tenary(self, inputs):
103
+ input_flat = inputs.reshape(self.q_groups, -1)
104
+ n = input_flat.shape[1]
105
+ m = input_flat.norm(p=1, dim=1).div(n)
106
+ thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat)
107
+ pos = (input_flat > thres).type(inputs.type())
108
+ neg = (input_flat < -thres).type(inputs.type())
109
+ mask = (input_flat.abs() > thres).type(inputs.type())
110
+ alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
111
+ output = alpha * pos - alpha * neg
112
+ output = output.reshape(inputs.shape).contiguous()
113
+ return output
114
+
115
+ def quantize_binary(self, inputs):
116
+ input_flat = inputs.reshape(self.q_groups, -1)
117
+ n = input_flat.shape[1]
118
+ m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
119
+ output = input_flat.sign().mul(m)
120
+ output = output.reshape(inputs.shape).contiguous()
121
+ return output
122
+
123
+ def mixed_fp16_quantize(self, input, input_q, index):
124
+ if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1):
125
+ input_q = input * self.quantize_real_ratio + (1 - self.quantize_real_ratio) * input_q
126
+ return input_q
127
+ return input_q
128
+
129
+ def compute_quantization(self, input, index=0, factor=1):
130
+ # fixing the quantization bits based on the training steps
131
+ # when reducing 1 bit at each period, we increase the period
132
+ # to go slowly toward the target quantization bits
133
+ # the period and starting bit can be configured
134
+
135
+ if input.start_bits != input.target_bits:
136
+ if self.qsteps >= input.q_period:
137
+ self.quantize_real_ratio = 1.0
138
+ input.q_period <<= 1
139
+ input.q_period *= factor
140
+ input.start_bits -= 1
141
+ if self.q_verbose:
142
+ logger.info(
143
+ f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}'
144
+ )
145
+ assert (input.start_bits >= input.target_bits), \
146
+ 'Quantization bit is lower than target precision bits!'
147
+
148
+ if self.use_quantizer_kernel:
149
+ if input.start_bits <= 2:
150
+ raise ValueError('Quantization bit is too low, please do it without quantization kernel!')
151
+ input_q = ds_quantizer(input.data.clone(),
152
+ self.q_groups,
153
+ input.start_bits,
154
+ asym=False if self.q_type == 'symmetric' else True,
155
+ sr=False if self.q_rounding == 'nearest_neighbor' else True)
156
+ else:
157
+ if input.start_bits >= 3:
158
+ input_flat = self.quantize_highbit(input.data, input.start_bits)
159
+ elif input.start_bits == 2:
160
+ assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
161
+ assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
162
+ input_flat = self.quantize_tenary(input.data)
163
+ elif input.start_bits == 1:
164
+ assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
165
+ assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
166
+ input_flat = self.quantize_binary(input.data)
167
+ if self.use_quantizer_kernel:
168
+ return self.mixed_fp16_quantize(input.data, input_q, index)
169
+ else:
170
+ if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1:
171
+ input_flat = self.quantize_real_ratio * input.data + \
172
+ (1 - self.quantize_real_ratio) * input_flat
173
+ return input_flat
174
+
175
+ def update_fp16_ratio(self):
176
+ if self.q_mixed_fp16:
177
+ if self.quantize_real_ratio > 0:
178
+ self.quantize_real_ratio -= self.q_change_ratio
179
+ else:
180
+ self.quantize_real_ratio = 0.000
venv/lib/python3.10/site-packages/deepspeed/utils/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .logging import logger, log_dist
7
+ from .comms_logging import get_caller_func
8
+ #from .distributed import init_distributed
9
+ from .init_on_device import OnDevice
10
+ from .groups import *
11
+ from .nvtx import instrument_w_nvtx
12
+ # TODO: Move tensor fragment and mixed precision to zero utils
13
+ from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad, map_to_flat_opt_states
14
+ from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state
15
+ from .tensor_fragment import set_full_hp_param
16
+ from .tensor_fragment import safe_set_full_fp32_param, safe_set_full_optimizer_state
17
+ from .tensor_fragment import safe_get_local_fp32_param, safe_get_local_grad, safe_get_local_optimizer_state
18
+ from .tensor_fragment import safe_set_local_fp32_param, safe_set_local_optimizer_state
19
+ from .z3_leaf_module import set_z3_leaf_modules, unset_z3_leaf_modules, get_z3_leaf_modules, z3_leaf_module, z3_leaf_parameter
20
+ from .mixed_precision_linkage import link_hp_params, lazy_init_hp_params_optimizer_state
21
+ from deepspeed.runtime.dataloader import RepeatingLoader
22
+ from .numa import get_numactl_cmd
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/bwc.cpython-310.pyc ADDED
Binary file (2.84 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/comms_logging.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/debug.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (361 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/groups.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/init_on_device.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/logging.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/mixed_precision_linkage.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/numa.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/nvtx.cpython-310.pyc ADDED
Binary file (697 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/tensor_fragment.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/timer.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/torch.cpython-310.pyc ADDED
Binary file (590 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/types.cpython-310.pyc ADDED
Binary file (751 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/z3_leaf_module.cpython-310.pyc ADDED
Binary file (4.8 kB). View file