applied-ai-018 commited on
Commit
9ad9e91
·
verified ·
1 Parent(s): 734b6a1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. venv/lib/python3.10/site-packages/deepspeed/elasticity/__init__.py +10 -0
  3. venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/utils.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/elasticity/config.py +110 -0
  5. venv/lib/python3.10/site-packages/deepspeed/elasticity/constants.py +81 -0
  6. venv/lib/python3.10/site-packages/deepspeed/elasticity/elastic_agent.py +188 -0
  7. venv/lib/python3.10/site-packages/deepspeed/elasticity/elasticity.py +378 -0
  8. venv/lib/python3.10/site-packages/deepspeed/elasticity/utils.py +14 -0
  9. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/base_optimizer.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/compiler.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config_utils.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/constants.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/dataloader.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/eigenvalue.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/hybrid_engine.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/lr_schedules.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/progressive_layer_drop.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/quantize.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/sparse_tensor.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/state_dict_factory.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/utils.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/weight_quantizer.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py +5 -0
  26. venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/checkpointing.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/config.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__init__.py +5 -0
  30. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/nebula_checkpoint_engine.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/torch_checkpoint_engine.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py +30 -0
  35. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py +107 -0
  36. venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py +34 -0
  37. venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__init__.py +5 -0
  38. venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py +26 -0
  41. venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__init__.py +7 -0
  42. venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/engine.py +1437 -0
  43. venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/schedule.py +494 -0
  44. venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/topology.py +456 -0
  45. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__init__.py +15 -0
  46. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/config.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/contiguous_memory_allocator.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/linear.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -138,3 +138,6 @@ venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-
138
  venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
139
  venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
140
  venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
138
  venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
139
  venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
140
  venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
141
+ venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
142
+ venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
143
+ venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/deepspeed/elasticity/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config
7
+ from .utils import is_torch_elastic_compatible
8
+ from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY
9
+ if is_torch_elastic_compatible():
10
+ from .elastic_agent import DSElasticAgent
venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/utils.cpython-310.pyc ADDED
Binary file (500 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/elasticity/config.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import json
7
+ from .constants import *
8
+
9
+
10
+ class ElasticityError(Exception):
11
+ """
12
+ Base exception for all elasticity related errors
13
+ """
14
+
15
+
16
+ class ElasticityConfigError(ElasticityError):
17
+ """
18
+ Elasticity configuration error
19
+ """
20
+
21
+
22
+ class ElasticityIncompatibleWorldSize(ElasticityError):
23
+ """
24
+ Attempting to run a world size that is incompatible with a given elastic config
25
+ """
26
+
27
+
28
+ class ElasticityConfig:
29
+ """
30
+ Elastic config object, constructed from a param dictionary that only contains elastic
31
+ config parameters, example below:
32
+
33
+ If elasticity is enabled, user must specify (at least) max_train_batch_size
34
+ and micro_batch_sizes.
35
+
36
+ {
37
+ "enabled": true,
38
+ "max_train_batch_size": 2000,
39
+ "micro_batch_sizes": [2,4,6],
40
+ "min_gpus": 1,
41
+ "max_gpus" : 10000
42
+ "min_time": 20
43
+ "ignore_non_elastic_batch_info": false
44
+ "version": 0.1
45
+ }
46
+ """
47
+
48
+ def __init__(self, param_dict):
49
+ self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT)
50
+ if self.enabled:
51
+ if MAX_ACCEPTABLE_BATCH_SIZE in param_dict:
52
+ self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE]
53
+ else:
54
+ raise ElasticityConfigError(f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}")
55
+ if MICRO_BATCHES in param_dict:
56
+ self.micro_batches = param_dict[MICRO_BATCHES]
57
+ else:
58
+ raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}")
59
+ else:
60
+ self.max_acceptable_batch_size = param_dict.get(MAX_ACCEPTABLE_BATCH_SIZE,
61
+ MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT)
62
+ self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT)
63
+
64
+ if not isinstance(self.micro_batches, list):
65
+ raise ElasticityConfigError(
66
+ f"Elasticity expected value of {MICRO_BATCHES} to be a "
67
+ f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}")
68
+
69
+ if not all(map(lambda m: isinstance(m, int), self.micro_batches)):
70
+ raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, "
71
+ f"instead contains: f{self.micro_batches}")
72
+
73
+ if not all(map(lambda m: m > 0, self.micro_batches)):
74
+ raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, "
75
+ f"instead contains: f{self.micro_batches}")
76
+
77
+ self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT)
78
+ self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT)
79
+
80
+ if self.min_gpus < 1 or self.max_gpus < 1:
81
+ raise ElasticityConfigError("Elasticity min/max gpus must be > 0, "
82
+ f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
83
+ if self.max_gpus < self.min_gpus:
84
+ raise ElasticityConfigError("Elasticity min_gpus cannot be greater than max_gpus, "
85
+ f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
86
+
87
+ self.model_parallel_size = param_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
88
+ if self.model_parallel_size < 1:
89
+ raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
90
+ f"given model-parallel size: {self.model_parallel_size}")
91
+
92
+ self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
93
+ if self.num_gpus_per_node < 1:
94
+ raise ElasticityConfigError("Number of GPUs per node cannot be less than 1, "
95
+ f"given number of GPUs per node: {self.num_gpus_per_node}")
96
+
97
+ self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT)
98
+ if self.min_time < 0:
99
+ raise ElasticityConfigError(f"Elasticity min time needs to be >= 0: given {self.min_time}")
100
+
101
+ self.version = param_dict.get(VERSION, VERSION_DEFAULT)
102
+ self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH, PREFER_LARGER_BATCH_DEFAULT)
103
+ self.ignore_non_elastic_batch_info = param_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
104
+ IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
105
+
106
+ def repr(self):
107
+ return self.__dict__
108
+
109
+ def __repr__(self):
110
+ return json.dumps(self.__dict__, sort_keys=True, indent=4)
venv/lib/python3.10/site-packages/deepspeed/elasticity/constants.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ #########################################
7
+ # Elasticity
8
+ #########################################
9
+ ''' Elasticity Utility in DeepSpeed can be used to create highly elastic jobs compatible
10
+ with a large number of GPUs. For elastic jobs, DeepSpeed will provide a batch size that
11
+ can support a large number of GPUs based on the user specified parameters
12
+ '''
13
+ FORMAT = '''
14
+ Elasticity should be enabled as:
15
+ "elasticity": {
16
+ "enabled": true,
17
+ "max_train_batch_size": 2000,
18
+ "micro_batch_sizes": [2,4,6],
19
+ "min_gpus": 1,
20
+ "max_gpus" : 10000,
21
+ "min_time": 20,
22
+ "prefer_larger_batch": true,
23
+ "ignore_non_elastic_batch_info": false,
24
+ "version": 0.1
25
+ }
26
+ '''
27
+
28
+ ELASTICITY = 'elasticity'
29
+
30
+ # Current elasticity version
31
+ LATEST_ELASTICITY_VERSION = 0.2
32
+
33
+ ENABLED = 'enabled'
34
+ ENABLED_DEFAULT = False
35
+
36
+ # Max acceptable train_batch_size
37
+ MAX_ACCEPTABLE_BATCH_SIZE = 'max_train_batch_size'
38
+ MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT = 2000
39
+
40
+ # Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu
41
+ MICRO_BATCHES = 'micro_batch_sizes'
42
+ MICRO_BATCHES_DEFAULT = [2, 4, 6]
43
+
44
+ # Min/max of GPUs to search over
45
+ MIN_GPUS = 'min_gpus'
46
+ MIN_GPUS_DEFAULT = 1
47
+ MAX_GPUS = 'max_gpus'
48
+ MAX_GPUS_DEFAULT = 10000
49
+
50
+ NUM_GPUS_PER_NODE = 'num_gpus_per_node'
51
+ NUM_GPUS_PER_NODE_DEFAULT = 1
52
+
53
+ MODEL_PARALLEL_SIZE = "model_parallel_size"
54
+ MODEL_PARALLEL_SIZE_DEFAULT = 1
55
+
56
+ # Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown
57
+ MIN_TIME = "min_time"
58
+ MIN_TIME_DEFAULT = 0
59
+
60
+ # When finding a suitable batch size, attempt to find one that is closest
61
+ # to the max train batch size given.
62
+ PREFER_LARGER_BATCH = 'prefer_larger_batch'
63
+ PREFER_LARGER_BATCH_DEFAULT = True
64
+
65
+ # In order to reduce confusion, if elastic mode is enabled we
66
+ # require (via assert) that no batch info is set outside of the
67
+ # elastic config. You can turn off this assert via this config
68
+ # but keep in mind that all batch info defined outside the
69
+ # elastic mode *will be ignored*.
70
+ IGNORE_NON_ELASTIC_BATCH_INFO = 'ignore_non_elastic_batch_info'
71
+ IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT = False
72
+
73
+ # Version of elastic logic to use
74
+ VERSION = "version"
75
+ VERSION_DEFAULT = LATEST_ELASTICITY_VERSION
76
+
77
+ # Minimum deepspeed version to use elasticity
78
+ MINIMUM_DEEPSPEED_VERSION = "0.3.8"
79
+
80
+ # Environment variable storing elastic config from resource scheduler
81
+ DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG"
venv/lib/python3.10/site-packages/deepspeed/elasticity/elastic_agent.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
7
+ from typing import Any, Dict, Optional, Tuple
8
+ from datetime import datetime
9
+ from torch.distributed.elastic.agent.server.api import _get_socket_with_port
10
+ from torch.distributed.elastic.metrics import put_metric
11
+ from torch.distributed.elastic.agent.server.api import (
12
+ RunResult,
13
+ WorkerGroup,
14
+ WorkerSpec,
15
+ WorkerState,
16
+ )
17
+ from torch.distributed import Store
18
+ import time
19
+ import os
20
+ from torch.distributed.elastic.multiprocessing import start_processes
21
+ from torch.distributed.elastic.utils import macros
22
+ import shutil
23
+ import copy
24
+ from contextlib import closing
25
+ import subprocess
26
+
27
+ from torch.distributed.elastic.utils.logging import get_logger
28
+
29
+ log = get_logger(__name__)
30
+
31
+
32
+ class DSElasticAgent(LocalElasticAgent):
33
+
34
+ def __init__(
35
+ self,
36
+ spec: WorkerSpec,
37
+ env: Dict,
38
+ start_method="spawn",
39
+ exit_barrier_timeout: float = 300,
40
+ log_dir: Optional[str] = None,
41
+ ):
42
+ super().__init__(spec, start_method, exit_barrier_timeout, log_dir)
43
+ self.ds_env = env
44
+
45
+ @staticmethod
46
+ def _set_master_addr_port(store: Store,
47
+ master_addr: Optional[str],
48
+ master_port: Optional[int],
49
+ local_addr: Optional[str] = None):
50
+ if master_port is None:
51
+ sock = _get_socket_with_port()
52
+ with closing(sock):
53
+ master_port = sock.getsockname()[1]
54
+
55
+ if master_addr is None:
56
+ # master_addr = _get_fq_hostname()
57
+ result = subprocess.check_output("hostname -I", shell=True)
58
+ master_addr = result.decode('utf-8').split()[0]
59
+
60
+ store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
61
+ store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
62
+
63
+ def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
64
+ spec = worker_group.spec
65
+ store = worker_group.store
66
+ assert store is not None
67
+ master_addr, master_port = super()._get_master_addr_port(store)
68
+ restart_count = spec.max_restarts - self._remaining_restarts
69
+
70
+ use_agent_store = spec.rdzv_handler.get_backend() == "static"
71
+
72
+ args: Dict[int, Tuple] = {}
73
+ envs: Dict[int, Dict[str, str]] = {}
74
+ for worker in worker_group.workers:
75
+ local_rank = worker.local_rank
76
+
77
+ worker_env_ds = copy.deepcopy(self.ds_env)
78
+ worker_env_elastic = {
79
+ "LOCAL_RANK": str(local_rank),
80
+ "RANK": str(worker.global_rank),
81
+ "GROUP_RANK": str(worker_group.group_rank),
82
+ "ROLE_RANK": str(worker.role_rank),
83
+ "ROLE_NAME": spec.role,
84
+ "LOCAL_WORLD_SIZE": str(spec.local_world_size),
85
+ "WORLD_SIZE": str(worker.world_size),
86
+ "GROUP_WORLD_SIZE": str(worker_group.group_world_size),
87
+ "ROLE_WORLD_SIZE": str(worker.role_world_size),
88
+ "MASTER_ADDR": master_addr,
89
+ "MASTER_PORT": str(master_port),
90
+ "TORCHELASTIC_RESTART_COUNT": str(restart_count),
91
+ "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
92
+ "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
93
+ "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
94
+ "NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)),
95
+ }
96
+ worker_env_ds.update(worker_env_elastic)
97
+ if "OMP_NUM_THREADS" in os.environ:
98
+ worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
99
+
100
+ envs[local_rank] = worker_env_ds
101
+ worker_args = list(spec.args)
102
+ worker_args = macros.substitute(worker_args, str(local_rank))
103
+ args[local_rank] = tuple(worker_args)
104
+
105
+ # scaling events do not count towards restarts (gets same attempt #)
106
+ # remove existing log dir if this restart is due to a scaling event
107
+ attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
108
+ shutil.rmtree(attempt_log_dir, ignore_errors=True)
109
+ os.makedirs(attempt_log_dir)
110
+
111
+ assert spec.entrypoint is not None
112
+ self._pcontext = start_processes(
113
+ name=spec.role,
114
+ entrypoint=spec.entrypoint,
115
+ args=args,
116
+ envs=envs,
117
+ log_dir=attempt_log_dir,
118
+ start_method=self._start_method,
119
+ redirects=spec.redirects,
120
+ tee=spec.tee,
121
+ )
122
+
123
+ return self._pcontext.pids()
124
+
125
+ def _invoke_run(self, role: str = "default") -> RunResult:
126
+ # NOTE: currently only works for a single role
127
+
128
+ spec = self._worker_group.spec
129
+ role = spec.role
130
+
131
+ log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}")
132
+
133
+ self._initialize_workers(self._worker_group)
134
+ monitor_interval = spec.monitor_interval
135
+ rdzv_handler = spec.rdzv_handler
136
+
137
+ participants = rdzv_handler._state_holder.state.participants
138
+
139
+ while True:
140
+ assert self._worker_group.state != WorkerState.INIT
141
+ time.sleep(monitor_interval)
142
+ run_result = self._monitor_workers(self._worker_group)
143
+ state = run_result.state
144
+ self._worker_group.state = state
145
+
146
+ expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *
147
+ rdzv_handler._settings.keep_alive_max_attempt)
148
+ _dead_nodes = [
149
+ node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()
150
+ if last_heartbeat < expire_time
151
+ ]
152
+
153
+ put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
154
+ put_metric(f"workers.{role}.{state.name.lower()}", 1)
155
+
156
+ if state == WorkerState.SUCCEEDED:
157
+ log.info(f"[{role}] worker group successfully finished."
158
+ f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.")
159
+ self._exit_barrier()
160
+ return run_result
161
+ elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED
162
+ } or len(participants) > len(rdzv_handler._state_holder.state.participants):
163
+ if self._remaining_restarts > 0:
164
+ log.info(f"[{role}] Worker group {state.name}. "
165
+ f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
166
+ f" will restart worker group")
167
+ self._remaining_restarts -= 1
168
+ # rdzv_handler._state_holder.state.restart = False
169
+ self._restart_workers(self._worker_group)
170
+ participants = rdzv_handler._state_holder.state.participants
171
+
172
+ else:
173
+ self._stop_workers(self._worker_group)
174
+ self._worker_group.state = WorkerState.FAILED
175
+ self._exit_barrier()
176
+ return run_result
177
+ elif state == WorkerState.HEALTHY:
178
+ # membership changes do not count as retries
179
+ num_nodes_waiting = rdzv_handler.num_nodes_waiting()
180
+ group_rank = self._worker_group.group_rank
181
+ if num_nodes_waiting > 0:
182
+ log.info(f"[{role}] Detected {num_nodes_waiting} "
183
+ f"new nodes from group_rank={group_rank}; "
184
+ f"will restart worker group")
185
+ self._restart_workers(self._worker_group)
186
+ participants = rdzv_handler._state_holder.state.participants
187
+ else:
188
+ raise Exception(f"[{role}] Worker group in {state.name} state")
venv/lib/python3.10/site-packages/deepspeed/elasticity/elasticity.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import json
8
+ import numpy as np
9
+ import math
10
+ from packaging import version as pkg_version
11
+
12
+ from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
13
+ ElasticityIncompatibleWorldSize
14
+ from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
15
+ MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
16
+ from ..git_version_info import version as __version__
17
+ from ..utils import logger
18
+
19
+ # Thirty eight smallest highly composite numbers. The list should
20
+ # be enough to support up to 720K batch size.
21
+ HCN_LIST = [
22
+ 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560, 10080, 15120, 20160,
23
+ 25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, 720720
24
+ ]
25
+
26
+
27
+ def get_candidate_batch_sizes(base_list, max_acceptable_batch_size):
28
+ candidate_batch_size = []
29
+ for base in base_list:
30
+ if base >= max_acceptable_batch_size:
31
+ candidate_batch_size.append(base)
32
+ else:
33
+ value = max_acceptable_batch_size // base
34
+ index = np.argmax(np.asarray(HCN_LIST) > value)
35
+ candidate_batch_size.append(HCN_LIST[index - 1] * base)
36
+ candidate_batch_size = list(set(candidate_batch_size))
37
+ logger.info(f"Candidate batch size: {candidate_batch_size}")
38
+ return candidate_batch_size
39
+
40
+
41
+ def get_valid_gpus(batch_size, micro_batches, min_valid_gpus, max_valid_gpus):
42
+ valid_gpus = []
43
+ for micro_batch in micro_batches:
44
+ if batch_size % micro_batch == 0:
45
+
46
+ max_gpus = batch_size // micro_batch
47
+ if min_valid_gpus <= max_gpus <= max_valid_gpus:
48
+ valid_gpus.append(max_gpus)
49
+
50
+ # find all factors less than max_gpus / 2
51
+ for i in range(1, max_gpus // 2 + 1):
52
+ if i > max_valid_gpus:
53
+ break
54
+ if i < min_valid_gpus:
55
+ continue
56
+ if max_gpus % i == 0:
57
+ valid_gpus.append(i)
58
+ valid_gpus = set(valid_gpus)
59
+ valid_gpus = sorted(list(valid_gpus))
60
+ return valid_gpus
61
+
62
+
63
+ def get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus, prefer_larger):
64
+
65
+ max_valid_gpus = 0
66
+ valid_gpus = None
67
+ final_batch_size = int(min(micro_batches))
68
+
69
+ for batch_size in candidate_batch_sizes:
70
+
71
+ current_valid_gpus = get_valid_gpus(batch_size, micro_batches, min_gpus, max_gpus)
72
+
73
+ if (len(current_valid_gpus) > max_valid_gpus or (len(current_valid_gpus) == max_valid_gpus and
74
+ ((prefer_larger and batch_size > final_batch_size) or
75
+ (not prefer_larger and batch_size < final_batch_size)))):
76
+ max_valid_gpus = len(current_valid_gpus)
77
+ valid_gpus = current_valid_gpus
78
+ final_batch_size = batch_size
79
+
80
+ return final_batch_size, valid_gpus
81
+
82
+
83
+ def _get_compatible_gpus_v01(micro_batches,
84
+ max_acceptable_batch_size,
85
+ min_gpus=None,
86
+ max_gpus=None,
87
+ prefer_larger=True):
88
+ '''We use two heuristics to compute the batch size
89
+ 1. We use the Lowest Common Multiple of the micro-batches
90
+ as the base batch size and scale it by a HCN such that the result is
91
+ the largest batch size less than the max_acceptable batch size
92
+ 2. We use each of the micro batches as a base and scale it
93
+ by a HCN such that the result is the largest batch size less than the
94
+ max_acceptable batch size.
95
+
96
+ We then use brute force to count the number of compatible GPU count for
97
+ each of the aforementioned cases, and return the batch size with the most number of
98
+ compatible GPU counts in the min-max GPU range if provided, other wise
99
+ we return the batch size with the most number of total compatible GPU counts.
100
+
101
+ Returns:
102
+ final_batch_size
103
+ valid_gpus
104
+ '''
105
+ min_gpus = min_gpus or 1
106
+ max_gpus = max_gpus or max_acceptable_batch_size // min(micro_batches)
107
+
108
+ if not all(mb <= max_acceptable_batch_size for mb in micro_batches):
109
+ raise ValueError(f"All micro batches must be less than \
110
+ or equal to max_acceptable_batch_size: {max_acceptable_batch_size}")
111
+
112
+ lcm = np.lcm.reduce(micro_batches)
113
+
114
+ base_list = []
115
+ base_list.extend(micro_batches)
116
+ base_list.append(lcm)
117
+
118
+ candidate_batch_sizes = get_candidate_batch_sizes(base_list, max_acceptable_batch_size)
119
+
120
+ final_batch_size, valid_gpus = get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus,
121
+ prefer_larger)
122
+
123
+ return final_batch_size, valid_gpus
124
+
125
+
126
+ def _get_compatible_gpus_v02(micro_batches,
127
+ max_acceptable_batch_size,
128
+ current_num_gpus,
129
+ min_gpus=None,
130
+ max_gpus=None,
131
+ prefer_larger=True,
132
+ num_gpus_per_node=1,
133
+ model_parallel_size=1):
134
+ '''
135
+ Returns:
136
+ final_batch_size
137
+ valid_gpus
138
+ micro-batch size
139
+ '''
140
+ if num_gpus_per_node % model_parallel_size != 0:
141
+ raise ElasticityError(
142
+ f"In Elasticity v0.2, number of GPUs per node:" \
143
+ f"{num_gpus_per_node} should be divisible by " \
144
+ f"model parallel size {model_parallel_size}")
145
+
146
+ def get_microbatch(final_batch_size):
147
+ candidate_microbatch = None
148
+
149
+ for micro_batch in micro_batches:
150
+ if final_batch_size // current_num_gpus % micro_batch == 0:
151
+ if candidate_microbatch is None:
152
+ candidate_microbatch = micro_batch
153
+ if prefer_larger and candidate_microbatch < micro_batch:
154
+ candidate_microbatch = micro_batch
155
+ return candidate_microbatch
156
+
157
+ dp_size_per_node = num_gpus_per_node // model_parallel_size
158
+
159
+ final_batch_size, valid_world_size = _get_compatible_gpus_v01(
160
+ micro_batches,
161
+ int(max_acceptable_batch_size / dp_size_per_node),
162
+ int(min_gpus / num_gpus_per_node),
163
+ int(max_gpus / num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level
164
+ prefer_larger=prefer_larger)
165
+
166
+ final_batch_size = int(final_batch_size) * dp_size_per_node
167
+ valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size]
168
+ if current_num_gpus // model_parallel_size in valid_dp_world_size:
169
+ candidate_microbatch = get_microbatch(final_batch_size)
170
+ return final_batch_size, valid_dp_world_size, candidate_microbatch
171
+
172
+ current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node
173
+ candidate_batch_sizes = []
174
+ for micro_batch in micro_batches:
175
+ min_batch_size = micro_batch * current_dp_size
176
+
177
+ factor = math.floor(max_acceptable_batch_size / float(min_batch_size))
178
+ candidate_batch_sizes.append(factor * min_batch_size)
179
+
180
+ used_microbatch = None
181
+ if prefer_larger:
182
+ candidate_batch_size = max(candidate_batch_sizes)
183
+ else:
184
+ candidate_batch_size = min(candidate_batch_sizes)
185
+
186
+ candidate_microbatch = get_microbatch(candidate_batch_size)
187
+
188
+ return candidate_batch_size, [int(current_dp_size)], candidate_microbatch
189
+
190
+
191
+ def _compatible_ds_version_check(target_deepspeed_version: str):
192
+ min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION)
193
+ target_version = pkg_version.parse(target_deepspeed_version)
194
+
195
+ err_str = f"Target deepspeed version of {target_deepspeed_version} is not compatible " \
196
+ f"with minimum version {MINIMUM_DEEPSPEED_VERSION} supporting elasticity."
197
+ if target_version < min_version:
198
+ raise ElasticityError(err_str)
199
+ return True
200
+
201
+
202
+ def elasticity_enabled(ds_config: dict):
203
+ if ELASTICITY not in ds_config:
204
+ return False
205
+ return ds_config[ELASTICITY].get(ENABLED, ENABLED_DEFAULT)
206
+
207
+
208
+ def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict):
209
+ """
210
+ Ensure the resource scheduler saw the same elastic config we are using at runtime
211
+ """
212
+ if DEEPSPEED_ELASTICITY_CONFIG in os.environ:
213
+ scheduler_elastic_config_dict = json.loads(os.environ[DEEPSPEED_ELASTICITY_CONFIG])
214
+ scheduler_elastic_config = ElasticityConfig(scheduler_elastic_config_dict)
215
+ runtime_elastic_config = ElasticityConfig(runtime_elastic_config_dict)
216
+ err_str = "Elastic config '{}={}' seen by resource scheduler does not match config passed to runtime {}={}"
217
+ if runtime_elastic_config.max_acceptable_batch_size != scheduler_elastic_config.max_acceptable_batch_size:
218
+ raise ElasticityConfigError(
219
+ err_str.format('max_acceptable_batch_size', scheduler_elastic_config.max_acceptable_batch_size,
220
+ 'max_acceptable_batch_size', runtime_elastic_config.max_acceptable_batch_size))
221
+ if runtime_elastic_config.micro_batches != scheduler_elastic_config.micro_batches:
222
+ raise ElasticityConfigError(
223
+ err_str.format('micro_batches', scheduler_elastic_config.micro_batches, 'micro_batches',
224
+ runtime_elastic_config.micro_batches))
225
+ if runtime_elastic_config.version != scheduler_elastic_config.version:
226
+ raise ElasticityConfigError(
227
+ err_str.format('version', scheduler_elastic_config.version, 'version', runtime_elastic_config.version))
228
+ else:
229
+ logger.warning("Unable to find DEEPSPEED_ELASTICITY_CONFIG environment variable, cannot " \
230
+ "guarantee resource scheduler will scale this job using compatible GPU counts.")
231
+
232
+
233
+ def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0, return_microbatch=False):
234
+ """Core deepspeed elasticity API. Given an elastic config (similar to the example below)
235
+ DeepSpeed will compute a total train batch size corresponding valid GPU count list that
236
+ provides a high level of elasticity. Elasticity in this case means we are safe to scale
237
+ the training job up/down across the GPU count list *without* any negative impacts on
238
+ training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation
239
+ feature which allows us to decompose a global training batch size into:
240
+ micro-batch-size * gradient-accumulation-steps * world-size.
241
+
242
+ "elasticity": {
243
+ "enabled": true,
244
+ "max_train_batch_size": 2000,
245
+ "micro_batch_sizes": [2,4,6],
246
+ "min_gpus": 1,
247
+ "max_gpus" : 10000
248
+ "min_time": 20
249
+ "version": 0.1
250
+ }
251
+
252
+ Intended to be called both by scheduling infrastructure and deepspeed runtime.
253
+ For the same `ds_config` we should return deterministic results.
254
+
255
+ Args:
256
+ ds_config (dict): DeepSpeed config dictionary/json
257
+ target_deepspeed_version (str): When called from scheduling
258
+ infrastructure we want to ensure that the target deepspeed version is
259
+ compatible with the elasticity version used in the backend.
260
+ world_size (int, optional): Intended/current DP world size, will do some sanity
261
+ checks to ensure world size is actually valid with the config.
262
+ return_microbatch (bool, optional): whether to return micro batch size or not.
263
+
264
+ Raises:
265
+ ElasticityConfigError: Missing required elasticity config or elasticity disabled
266
+ ElasticityError: If target deepspeed version is not compatible with current version
267
+
268
+ Returns:
269
+ final_batch_size (int): total batch size used for training
270
+ valid_gpus (list(int)): list of valid GPU counts with this config
271
+ micro_batch_size (int, optional): if world_size is provided will return
272
+ specific micro batch size
273
+ """
274
+ if not isinstance(ds_config, dict):
275
+ raise ValueError("Expected ds_config to be a dictionary but received " \
276
+ f"a {type(ds_config)}, containing: {ds_config}")
277
+
278
+ if ELASTICITY not in ds_config:
279
+ raise ElasticityConfigError(f"'{ELASTICITY}' is missing from config json," \
280
+ " please add it if running an elastic training job.")
281
+
282
+ elastic_config_dict = ds_config[ELASTICITY]
283
+ if not elastic_config_dict.get(ENABLED, ENABLED_DEFAULT):
284
+ raise ElasticityConfigError("Elasticity is disabled, please enable it " \
285
+ "('enabled':true) if running an elastic training job.")
286
+
287
+ elastic_config = ElasticityConfig(elastic_config_dict)
288
+ model_parallel_size = elastic_config.model_parallel_size
289
+ num_gpus_per_node = elastic_config.num_gpus_per_node
290
+
291
+ if model_parallel_size > 1 and float(elastic_config.version) != 0.2:
292
+ raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \
293
+ f"does not support model-parallel training. Given model-parallel size: " \
294
+ f"{model_parallel_size}")
295
+
296
+ if float(elastic_config.version) > LATEST_ELASTICITY_VERSION:
297
+ raise ElasticityConfigError("Attempting to run elasticity version " \
298
+ f"{elastic_config.version} but runtime only supports up " \
299
+ f"to {LATEST_ELASTICITY_VERSION}")
300
+
301
+ # Ensure target deepspeed version works with intended elasticity version
302
+ if not _compatible_ds_version_check(target_deepspeed_version):
303
+ raise ElasticityError("Unable to run elasticity on target deepspeed version of" \
304
+ f" {target_deepspeed_version}, currently {__version__}")
305
+
306
+ if float(elastic_config.version) == 0.1:
307
+ final_batch_size, valid_gpus = _get_compatible_gpus_v01(
308
+ micro_batches=elastic_config.micro_batches,
309
+ max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
310
+ min_gpus=elastic_config.min_gpus,
311
+ max_gpus=elastic_config.max_gpus,
312
+ prefer_larger=elastic_config.prefer_larger_batch_size)
313
+ # ensure batch size is int dtype
314
+ final_batch_size = int(final_batch_size)
315
+ elif float(elastic_config.version) == 0.2:
316
+ if world_size != 0:
317
+ current_num_gpus = world_size
318
+ else:
319
+ if "WORLD_SIZE" in os.environ and \
320
+ os.getenv('WORLD_SIZE').isnumeric():
321
+ current_num_gpus = int(os.getenv('WORLD_SIZE'))
322
+ else:
323
+ WORLD_SIZE = os.getenv('WORLD_SIZE')
324
+ raise ElasticityConfigError(
325
+ 'Elasticity V 0.2 needs WORLD_SIZE '\
326
+ 'to compute valid batch size. '\
327
+ 'Either give it as argument to function compute_elastic_config '\
328
+ 'or set it as an environment variable. '\
329
+ f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}')
330
+
331
+ final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02(
332
+ micro_batches=elastic_config.micro_batches,
333
+ max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
334
+ current_num_gpus=current_num_gpus,
335
+ min_gpus=elastic_config.min_gpus,
336
+ max_gpus=elastic_config.max_gpus,
337
+ prefer_larger=elastic_config.prefer_larger_batch_size,
338
+ num_gpus_per_node=num_gpus_per_node,
339
+ model_parallel_size=model_parallel_size)
340
+ # ensure batch size is int dtype
341
+ final_batch_size = int(final_batch_size)
342
+ else:
343
+ raise NotImplementedError(f"Unable to find elastic logic for version: {elastic_config.version}")
344
+
345
+ logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}")
346
+
347
+ if world_size > 0:
348
+ if world_size not in valid_gpus:
349
+ raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \
350
+ f"with the current list of valid GPU counts: {valid_gpus}")
351
+
352
+ # Pick largest valid micro batch size
353
+ micro_batch_size = None
354
+ for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
355
+ if final_batch_size // world_size % mbsz == 0:
356
+ micro_batch_size = mbsz
357
+ break
358
+ assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
359
+ f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
360
+ f" micro_batches={elastic_config.micro_batches}."
361
+ return final_batch_size, valid_gpus, micro_batch_size
362
+
363
+ if return_microbatch:
364
+ # Pick a valid micro batch size
365
+ if float(elastic_config.version) == 0.2:
366
+ return final_batch_size, valid_gpus, candidate_microbatch_size
367
+ else:
368
+ micro_batch_size = None
369
+ for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
370
+ if final_batch_size // world_size % mbsz == 0:
371
+ micro_batch_size = mbsz
372
+ break
373
+ assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
374
+ f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
375
+ f" micro_batches={elastic_config.micro_batches}."
376
+ return final_batch_size, valid_gpus, micro_batch_size
377
+
378
+ return final_batch_size, valid_gpus
venv/lib/python3.10/site-packages/deepspeed/elasticity/utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.utils.torch import required_torch_version
7
+
8
+
9
+ def is_torch_elastic_compatible():
10
+ '''
11
+ Helper to lookup torch version. Elastic training is
12
+ introduced in 1.11.x
13
+ '''
14
+ return required_torch_version(min_version=1.11)
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/base_optimizer.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/compiler.cpython-310.pyc ADDED
Binary file (5.93 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config.cpython-310.pyc ADDED
Binary file (29.8 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (8.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/constants.cpython-310.pyc ADDED
Binary file (8.87 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/dataloader.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/eigenvalue.cpython-310.pyc ADDED
Binary file (5.33 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc ADDED
Binary file (112 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/hybrid_engine.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/lr_schedules.cpython-310.pyc ADDED
Binary file (30.8 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/progressive_layer_drop.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/quantize.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/sparse_tensor.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/state_dict_factory.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/utils.cpython-310.pyc ADDED
Binary file (31.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/weight_quantizer.cpython-310.pyc ADDED
Binary file (6.64 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (260 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/checkpointing.cpython-310.pyc ADDED
Binary file (28.6 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.41 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (253 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/nebula_checkpoint_engine.cpython-310.pyc ADDED
Binary file (3.77 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/torch_checkpoint_engine.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+
8
+
9
+ class CheckpointEngine(object):
10
+
11
+ # init checkpoint engine for save/load
12
+ def __init__(self, config_params=None):
13
+ pass
14
+
15
+ def create(self, tag):
16
+ # create checkpoint on give tag for save/load.
17
+ pass
18
+
19
+ def makedirs(self, path, exist_ok=False):
20
+ os.makedirs(path, exist_ok=exist_ok)
21
+
22
+ def save(self, state_dict, path: str):
23
+ pass
24
+
25
+ def load(self, path: str, map_location=None):
26
+ pass
27
+
28
+ def commit(self, tag):
29
+ # to tell checkpoint services if all files are ready.
30
+ pass
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ import torch_nebula
9
+
10
+ from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
11
+ CheckpointEngine
12
+ from deepspeed.utils import logger, log_dist
13
+ from deepspeed.nebula.constants import *
14
+
15
+
16
+ def _get_tag_from_path(path):
17
+ return os.path.basename(os.path.dirname(path))
18
+
19
+
20
+ class NebulaCheckpointEngine(CheckpointEngine):
21
+
22
+ def __init__(self, config_params=None):
23
+ super().__init__(config_params)
24
+ self.checkpoint = None
25
+ self.tag_flag = None
26
+ self.enable_nebula_load = config_params.enable_nebula_load
27
+ self.nebula_load_path = config_params.load_path
28
+ if self.nebula_load_path is None:
29
+ self.nebula_load_path = config_params.persistent_storage_path
30
+
31
+ nebula_config_params = {
32
+ NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path,
33
+ NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval,
34
+ NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention,
35
+ }
36
+ torch_nebula.init(**nebula_config_params)
37
+
38
+ def create(self, tag):
39
+ log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0])
40
+ # -2 means: customer needs to explicitly tell nebula
41
+ # current checkpoint is complete by commit method.
42
+ self.checkpoint = torch_nebula.Checkpoint(tag, -2)
43
+
44
+ def save(self, state_dict, path: str):
45
+ log_dist(f"[Nebula] Create dummy files for loading.")
46
+ torch.save("", path)
47
+
48
+ tag = _get_tag_from_path(path)
49
+ partition_name = os.path.basename(path)
50
+ logger.info(f"[Nebula] Saving {partition_name} under tag {tag}...")
51
+ self.checkpoint.save(partition_name, state_dict)
52
+ logger.info(f"[Nebula] Saved {partition_name} under tag {tag}.")
53
+ return None
54
+
55
+ def load(self, path: str, map_location=None):
56
+ tag = _get_tag_from_path(path)
57
+ first_load_flag = self.tag_flag is None or self.tag_flag == tag
58
+ if not self.enable_nebula_load and first_load_flag:
59
+ self.tag_flag = tag
60
+ logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...")
61
+ partition = torch.load(path, map_location=map_location)
62
+ logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .")
63
+ return partition
64
+
65
+ partition_name = os.path.basename(path)
66
+ logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...")
67
+
68
+ checkpoint = None
69
+ if tag in (None, 'latest', 'latest_universal'):
70
+ # In some cases, there is the inconsistent tag between deepspeed metadata (latest file)
71
+ # and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we
72
+ # will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary
73
+ # when met failure loading for given tag, the loading priority would be like:
74
+ # nebula tier3 latest > nebula tier1 latest.
75
+ checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
76
+ else:
77
+ checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path)
78
+
79
+ if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
80
+ logger.info(
81
+ f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!"
82
+ )
83
+ # nebula tier3 latest
84
+ checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
85
+ if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
86
+ logger.info(
87
+ f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!"
88
+ )
89
+ # nebula tier1 latest
90
+ checkpoint = torch_nebula.get_latest_checkpoint()
91
+ logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.")
92
+ return None
93
+
94
+ tag = checkpoint.tag
95
+ self.tag_flag = -1
96
+ partition = checkpoint.load(partition_name, map_location=map_location)
97
+ logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.")
98
+ return partition
99
+
100
+ def commit(self, tag):
101
+ # nebula commit will be call when all files under give tag are ready to be persisted in the async way.
102
+ logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting")
103
+ commit_rls = self.checkpoint.commit()
104
+ if not commit_rls:
105
+ logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.")
106
+ return False
107
+ return commit_rls
venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.utils import logger, log_dist
8
+ from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
9
+ CheckpointEngine
10
+
11
+
12
+ class TorchCheckpointEngine(CheckpointEngine):
13
+
14
+ def __init__(self, config_params=None):
15
+ super().__init__(config_params)
16
+
17
+ def create(self, tag):
18
+ log_dist(f"[Torch] Checkpoint {tag} is about to be saved!", ranks=[0])
19
+
20
+ def save(self, state_dict, path: str):
21
+ logger.info(f"[Torch] Saving {path}...")
22
+ torch.save(state_dict, path)
23
+ logger.info(f"[Torch] Saved {path}.")
24
+ return None
25
+
26
+ def load(self, path: str, map_location=None):
27
+ logger.info(f"[Torch] Loading checkpoint from {path}...")
28
+ partition = torch.load(path, map_location=map_location)
29
+ logger.info(f"[Torch] Loaded checkpoint from {path}.")
30
+ return partition
31
+
32
+ def commit(self, tag):
33
+ logger.info(f"[Torch] Checkpoint {tag} is ready now!")
34
+ return True
venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (247 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import cupy
7
+ from torch.utils.dlpack import to_dlpack
8
+ from torch.utils.dlpack import from_dlpack
9
+
10
+
11
+ class CupyBackend(object):
12
+
13
+ def __init__(self):
14
+ pass
15
+
16
+ def torch2cupy(self, tensor):
17
+ return cupy.fromDlpack(to_dlpack(tensor))
18
+
19
+ def cupy2torch(self, cupy_tensor):
20
+ return from_dlpack(cupy_tensor.toDlpack())
21
+
22
+ def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
23
+ packed_sign = cupy.packbits(cupy_bool_tensor)
24
+ sign_list_packed = cupy.split(packed_sign, num_chunks)
25
+ cupy.cuda.get_current_stream().synchronize()
26
+ return sign_list_packed
venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .module import PipelineModule, LayerSpec, TiedLayerSpec
7
+ from .topology import ProcessTopology
venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/engine.py ADDED
@@ -0,0 +1,1437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from types import MethodType
7
+ from collections import OrderedDict
8
+
9
+ import torch
10
+ from deepspeed import comm as dist
11
+
12
+ from deepspeed.utils import logger
13
+ from deepspeed.utils.timer import ThroughputTimer
14
+ from deepspeed.accelerator import get_accelerator
15
+ from deepspeed.runtime.bf16_optimizer import BF16_Optimizer
16
+
17
+ from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE
18
+ from deepspeed.utils.timer import FORWARD_MICRO_TIMER, FORWARD_GLOBAL_TIMER, BACKWARD_MICRO_TIMER, \
19
+ BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_INNER_GLOBAL_TIMER, \
20
+ BACKWARD_REDUCE_MICRO_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER, \
21
+ STEP_MICRO_TIMER, STEP_GLOBAL_TIMER
22
+
23
+ from ..utils import PartitionedTensor
24
+ from ..dataloader import RepeatingLoader
25
+ from ..zero.config import ZeroStageEnum
26
+ from ..activation_checkpointing import checkpointing as ds_checkpointing
27
+
28
+ from .module import PipelineModule, PipelineError
29
+ from . import p2p
30
+ from . import schedule
31
+
32
+ TARGET_ID = -2
33
+ LOG_STAGE = -2
34
+ DATA_PARALLEL_ID = -2
35
+
36
+ BATCH_INPUT_TIMER = 'batch_input'
37
+ TRAIN_BATCH_TIMER = 'train_batch'
38
+ PIPE_SEND_OUTPUT_TIMER = 'pipe_send_output'
39
+ PIPE_SEND_GRAD_TIMER = 'pipe_send_grad'
40
+ PIPE_RECV_INPUT_TIMER = 'pipe_recv_input'
41
+ PIPE_RECV_GRAD_TIMER = 'pipe_recv_grad'
42
+
43
+
44
+ def is_even(number):
45
+ return number % 2 == 0
46
+
47
+
48
+ mem_alloced = 0
49
+ mem_cached = 0
50
+
51
+
52
+ def _tensor_bytes(tensor):
53
+ return tensor.numel() * tensor.element_size()
54
+
55
+
56
+ class PipelineEngine(DeepSpeedEngine):
57
+ """ A training engine hybrid pipeline, data, and model parallel training.
58
+
59
+ This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule`
60
+ is provided.
61
+ """
62
+ ID_TO_DTYPE = [
63
+ torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8,
64
+ torch.int8, torch.int16, torch.int32, torch.int64, torch.bool
65
+ ]
66
+ DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)}
67
+
68
+ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs):
69
+ super().__init__(*super_args, **super_kwargs)
70
+ assert isinstance(self.module, PipelineModule) \
71
+ or (hasattr(self.module, 'wrapped') and isinstance(self.module.wrapped, PipelineModule)), \
72
+ "model must base PipelineModule"
73
+
74
+ assert self.zero_optimization_stage(
75
+ ) < ZeroStageEnum.gradients, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism"
76
+
77
+ # We schedule the all-reduces, so disable it in super().backward()
78
+ self.enable_backward_allreduce = False
79
+ self.has_bool_tensors = has_bool_tensors
80
+ self.eval_return_logits = False
81
+ self.outputs = None
82
+ # BF16 Optimizer is hardcoded for fp32 gradient accumulation
83
+ self.using_bf16_optimizer = type(self.optimizer) == BF16_Optimizer
84
+
85
+ # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB
86
+ self.pipeline_enable_backward_allreduce = True
87
+
88
+ if self.elasticity_enabled():
89
+ if not self.is_elastic_model_parallel_supported():
90
+ assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
91
+ " with pipeline parallelism."
92
+
93
+ # pipeline step for logging
94
+ self.log_batch_step_id = -1
95
+
96
+ self.micro_batch_size = self.train_micro_batch_size_per_gpu()
97
+ self.micro_batches = self.gradient_accumulation_steps()
98
+
99
+ # Set Grid and Communication Groups
100
+ self.grid = self.module._grid
101
+ if self.grid.get_global_rank() == 0:
102
+ logger.info(f'CONFIG: micro_batches={self.micro_batches} '
103
+ f'micro_batch_size={self.micro_batch_size}')
104
+
105
+ self.global_rank = self.grid.get_global_rank()
106
+
107
+ assert self.dp_world_size == self.grid.data_parallel_size
108
+ assert self.train_batch_size() == \
109
+ self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size
110
+
111
+ # Set Stage Inf
112
+ self.num_stages = self.grid.pipe_parallel_size
113
+ self.stage_id = self.grid.get_stage_id()
114
+ self.prev_stage = self.stage_id - 1
115
+ self.next_stage = self.stage_id + 1
116
+
117
+ self.data_iterator = None
118
+ self.batch_fn = None
119
+
120
+ self._force_grad_boundary = False
121
+
122
+ self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(),
123
+ logging_fn=self.tput_log,
124
+ monitor_memory=False,
125
+ steps_per_output=self.steps_per_print())
126
+
127
+ # PipelineEngine needs to handle data loading specially due to only the first
128
+ # and last stages loading inputs/labels. We construct a sampler that uses
129
+ if self.training_data:
130
+ self._build_data_iter(self.training_data)
131
+
132
+ self.is_pipe_parallel = self.grid.pipe_parallel_size > 1
133
+ self.is_data_parallel = self.grid.data_parallel_size > 1
134
+ self.is_model_parallel = self.grid.model_parallel_size > 1
135
+
136
+ # Partition input/output buffers
137
+ # XXX temporarily disable while I revert some partition hacks.
138
+ assert isinstance(self._config.pipeline['pipe_partitioned'], bool)
139
+ assert isinstance(self._config.pipeline['grad_partitioned'], bool)
140
+ self.is_pipe_partitioned = self.is_model_parallel and self._config.pipeline['pipe_partitioned']
141
+ self.is_grad_partitioned = self.is_model_parallel and self._config.pipeline['grad_partitioned']
142
+ logger.info(f'is_pipe_partitioned= {self.is_pipe_partitioned} '
143
+ f'is_grad_partitioned= {self.is_grad_partitioned}')
144
+
145
+ model_parameters = filter(lambda p: p.requires_grad, self.module.parameters())
146
+ num_params = sum([p.numel() for p in model_parameters])
147
+ unique_params = num_params
148
+ # Subtract tied parameters if we don't own them
149
+ if self.module.tied_comms:
150
+ tied_params = 0
151
+ for key, d in self.module.tied_comms.items():
152
+ if self.global_rank != min(d['ranks']):
153
+ tied_params += sum(p.numel() for p in d['module'].parameters())
154
+ unique_params -= tied_params
155
+ params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device)
156
+ dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group())
157
+ params_tensor = params_tensor.tolist()
158
+ total_params = params_tensor[0]
159
+ unique_params = params_tensor[1]
160
+ if self.grid.data_parallel_id == 0:
161
+ logger.info(f'RANK={self.global_rank} '
162
+ f'STAGE={self.stage_id} '
163
+ f'LAYERS={self.module._local_stop - self.module._local_start} '
164
+ f'[{self.module._local_start}, {self.module._local_stop}) '
165
+ f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) '
166
+ f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) '
167
+ f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)')
168
+
169
+ #initialize peer-2-peer communication and allreduce groups
170
+ if self.is_pipe_parallel:
171
+ p2p.init_process_groups(self.grid)
172
+
173
+ # Pipeline buffers
174
+ self.num_pipe_buffers = 0
175
+ self.pipe_buffers = {
176
+ 'inputs': [], # batch input and received activations
177
+ 'labels': [], # labels from batch input
178
+ 'outputs': [], # activations
179
+ 'output_tensors': [], # tensor object to preserve backward graph
180
+ }
181
+ self.pipe_recv_buf = None
182
+ self.grad_layer = None
183
+
184
+ self.meta_buffer = None
185
+
186
+ self.first_output_send = True
187
+ self.first_gradient_send = True
188
+ self.pipe_partition_input_meta_cache = None
189
+ self.pipe_partition_output_meta_cache = None
190
+ self.pipe_partition_grad_meta_cache = None
191
+ self.grad_partition_grad_layer_meta_cache = None
192
+
193
+ #stores the loss for the current micro batch being processed
194
+ self.loss = torch.tensor(0.0).to(self.device)
195
+
196
+ #stores the loss for the entire batch
197
+ self.total_loss = None
198
+ self.total_additional_losses = None
199
+ self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
200
+ self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
201
+
202
+ # stores aggregated-DP train final loss and aggregated-DP additional losses, if any
203
+ # additional losses are stored as dict: {loss-name: agg-loss}
204
+ self.agg_train_loss = None
205
+ self.agg_additional_losses = None
206
+
207
+ if self._config.pipeline['activation_checkpoint_interval'] > 0:
208
+ self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval']
209
+ # set use_reentrant default to True.
210
+ if self._config.pipeline.get('use_reentrant') is None:
211
+ self._config.pipeline['use_reentrant'] = True
212
+ if self._config.pipeline['use_reentrant'] is False:
213
+ # set activation_checkpoint_func to non_reentrant_checkpoint func.
214
+ self.module.activation_checkpoint_func = ds_checkpointing.non_reentrant_checkpoint
215
+ if self.grid.get_global_rank() == 0:
216
+ logger.info(f'CONFIG: activation_checkpoint_func=non_reentrant_checkpoint')
217
+
218
+ self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline
219
+
220
+ if self.is_last_stage():
221
+ self.loss_model = self.module.loss_fn
222
+
223
+ self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe'
224
+ # Initialize pipeline communicators. Just send a 0.
225
+ if is_even(self.stage_id):
226
+ if not self.is_last_stage():
227
+ p2p.send(self.loss, self.next_stage)
228
+ if not self.is_first_stage():
229
+ p2p.recv(self.loss, self.prev_stage)
230
+ else:
231
+ if not self.is_first_stage():
232
+ p2p.recv(self.loss, self.prev_stage)
233
+ if not self.is_last_stage():
234
+ p2p.send(self.loss, self.next_stage)
235
+
236
+ # XXX look into timer reporting timing
237
+ # Initialize some timers because of early weirdness.
238
+ if self.wall_clock_breakdown():
239
+ self.timers(FORWARD_MICRO_TIMER).start()
240
+ self.timers(FORWARD_MICRO_TIMER).stop()
241
+ self.timers(BACKWARD_MICRO_TIMER).start()
242
+ self.timers(BACKWARD_MICRO_TIMER).stop()
243
+ self.timers(BACKWARD_INNER_MICRO_TIMER).start()
244
+ self.timers(BACKWARD_INNER_MICRO_TIMER).stop()
245
+ self.timers(BACKWARD_REDUCE_MICRO_TIMER).start()
246
+ self.timers(BACKWARD_REDUCE_MICRO_TIMER).stop()
247
+ self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).start()
248
+ self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).stop()
249
+ self.timers(STEP_MICRO_TIMER).start()
250
+ self.timers(STEP_MICRO_TIMER).stop()
251
+
252
+ def set_has_attention_mask(self, value):
253
+ assert isinstance(value, bool)
254
+ self.has_attention_mask = value
255
+
256
+ def _build_data_iter(self, dataset):
257
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset,
258
+ num_replicas=self.dp_world_size,
259
+ rank=self.mpu.get_data_parallel_rank(),
260
+ shuffle=False)
261
+ # Build a loader and make it repeating.
262
+ pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler)
263
+ pipe_dataloader = RepeatingLoader(pipe_dataloader)
264
+ self.set_dataloader(pipe_dataloader)
265
+
266
+ def _exec_reduce_tied_grads(self):
267
+ # We need to run this first to write to self.averaged_gradients;
268
+ # since this class turns `enable_backward_allreduce` off,
269
+ # `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine
270
+ # never actually runs. I suspect this is because of efficiency problems; get_flat_partition in
271
+ # stage2.py might do something expensive; someone will have to look into that later. But
272
+ # in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling
273
+ # needed to decide if it actually breaks everything.
274
+ # (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944)
275
+ if self.zero_optimization_partition_gradients():
276
+ self.optimizer.overlapping_partition_gradients_reduce_epilogue()
277
+
278
+ weight_group_list = self.module.get_tied_weights_and_groups()
279
+ for weight, group in weight_group_list:
280
+ grad = weight._hp_grad if self.using_bf16_optimizer else weight.grad
281
+ dist.all_reduce(grad, group=group)
282
+
283
+ def _exec_reduce_grads(self):
284
+ self._force_grad_boundary = True
285
+ if self.pipeline_enable_backward_allreduce:
286
+ if self.using_bf16_optimizer:
287
+ # PP+BF16 work for ZeRO Stage 1
288
+ self._bf16_reduce_grads()
289
+ else:
290
+ self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE)
291
+ self._force_grad_boundary = False
292
+
293
+ def _bf16_reduce_grads(self):
294
+ self.buffered_allreduce_fallback(grads=None, elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE)
295
+
296
+ def _reserve_pipe_buffers(self, num_buffers):
297
+ """Ensure that each pipeline buffer has at least ``num_buffers`` slots.
298
+
299
+ This method only reserves slots and does not allocate tensors.
300
+
301
+ Args:
302
+ num_buffers (int): The number of buffers to reserve.
303
+ """
304
+ if self.num_pipe_buffers >= num_buffers:
305
+ return
306
+
307
+ num_added = num_buffers - self.num_pipe_buffers
308
+ for key in self.pipe_buffers:
309
+ self.pipe_buffers[key].extend([None] * num_added)
310
+ self.num_pipe_buffers = num_buffers
311
+
312
+ def reset_activation_shape(self):
313
+ """Reset the buffers when the shape of activation and gradient change.
314
+ For example, for curriculum learning that changes the seqlen of each
315
+ sample, we need to call this whenever the seqlen is going to change.
316
+ """
317
+ self.first_output_send = True
318
+ self.pipe_recv_buf = None
319
+ self.grad_layer = None
320
+ self.meta_buffer = None
321
+
322
+ self.pipe_partition_input_meta_cache = None
323
+ self.pipe_partition_output_meta_cache = None
324
+ self.pipe_partition_grad_meta_cache = None
325
+ self.grad_partition_grad_layer_meta_cache = None
326
+
327
+ def train_batch(self, data_iter=None):
328
+ """Progress the pipeline to train the next batch of data. The engine will ingest
329
+ ``self.train_batch_size()`` total samples collectively across all workers.
330
+
331
+
332
+ An iterator that over training data should be provided as an argument
333
+ unless ``deepspeed.initialize()`` was provided a training set. In that event,
334
+ the training data will automatically be read.
335
+
336
+
337
+ .. warning::
338
+ A total of ``self.gradient_accumulation_steps()`` entries will be pulled
339
+ from ``data_iter`` by each pipeline. There must be sufficient
340
+ data left in ``data_iter`` or else a ``StopIteration`` will halt training.
341
+
342
+ DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
343
+ that wraps data loaders to automatically restart upon a ``StopIteration``.
344
+
345
+ Args:
346
+ data_iter (Iterator, optional): Iterator of training data.
347
+
348
+ Returns:
349
+ The arithmetic mean of the losses computed this batch.
350
+ """
351
+ if not torch._C.is_grad_enabled():
352
+ raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.')
353
+
354
+ # Curriculum learning could change activation shape
355
+ if self.curriculum_enabled_legacy():
356
+ new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
357
+ self.global_steps + 1)
358
+ if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
359
+ self.reset_activation_shape()
360
+ self.curriculum_scheduler_legacy.first_step = False
361
+ elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
362
+ self.global_steps):
363
+ self.reset_activation_shape()
364
+
365
+ if data_iter is not None:
366
+ self.set_dataiterator(data_iter)
367
+
368
+ self.module.train()
369
+ self.total_loss = None
370
+ self.total_additional_losses = None
371
+ self._compute_loss = True
372
+
373
+ # Do the work
374
+ self.timers(TRAIN_BATCH_TIMER).start()
375
+ sched = schedule.TrainSchedule(micro_batches=self.micro_batches,
376
+ stages=self.num_stages,
377
+ stage_id=self.stage_id)
378
+ self._exec_schedule(sched)
379
+
380
+ with torch.no_grad():
381
+ self.agg_train_loss = self._aggregate_total_loss()
382
+
383
+ self.timers(TRAIN_BATCH_TIMER).stop()
384
+
385
+ if self.global_steps % self.steps_per_print() == 0:
386
+ if self.global_rank == 0:
387
+ elapsed = self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) / 1000.0
388
+ iter_time = elapsed / self.steps_per_print()
389
+ tput = self.train_batch_size() / iter_time
390
+ log_str = f'steps: {self.global_steps} loss: {self.agg_train_loss:0.4f} '
391
+ if self.agg_additional_losses is not None:
392
+ for loss_name, loss_value in self.agg_additional_losses.items():
393
+ log_str += f'{loss_name}: {loss_value.item():0.4f} '
394
+ log_str += f'iter time (s): {iter_time:0.3f} samples/sec: {tput:0.3f}'
395
+ print(log_str)
396
+ else:
397
+ self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True)
398
+
399
+ # Monitoring
400
+ if self.global_rank == 0 and self.monitor.enabled:
401
+ self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(),
402
+ self.global_samples)]
403
+ self.monitor.write_events(self.summary_events)
404
+
405
+ if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0:
406
+ self.timers.log([
407
+ PIPE_SEND_OUTPUT_TIMER,
408
+ PIPE_SEND_GRAD_TIMER,
409
+ PIPE_RECV_INPUT_TIMER,
410
+ PIPE_RECV_GRAD_TIMER,
411
+ ])
412
+
413
+ # TODO: should return precisely what loss returned and allow others to be queried?
414
+ return self.agg_train_loss
415
+
416
+ def eval_batch(self,
417
+ data_iter,
418
+ return_logits=False,
419
+ compute_loss=True,
420
+ reduce_output='avg',
421
+ bcast_loss=True,
422
+ num_micro_batches=None):
423
+ """Evaluate the pipeline on a batch of data from ``data_iter``. The
424
+ engine will evaluate ``self.train_batch_size()`` total samples
425
+ collectively across all workers.
426
+
427
+ This method is equivalent to:
428
+
429
+ .. code-block:: python
430
+
431
+ module.eval()
432
+ with torch.no_grad():
433
+ output = module(batch)
434
+
435
+ .. warning::
436
+ A total of ``self.gradient_accumulation_steps()`` entries will be pulled
437
+ from ``data_iter`` by each pipeline. There must be sufficient
438
+ data left in ``data_iter`` or else a ``StopIteration`` will halt training.
439
+
440
+ DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
441
+ that wraps data loaders to automatically restart upon a ``StopIteration``.
442
+
443
+ Args:
444
+ data_iter (Iterator): Iterator of data to evaluate.
445
+
446
+ Returns:
447
+ The arithmetic mean of the losses computed this batch.
448
+ """
449
+ self.eval_return_logits = return_logits
450
+ self.module.eval()
451
+
452
+ # Curriculum learning could change activation shape
453
+ if self.curriculum_enabled_legacy():
454
+ new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
455
+ self.global_steps + 1)
456
+ if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
457
+ self.reset_activation_shape()
458
+ self.curriculum_scheduler_legacy.first_step = False
459
+ elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
460
+ self.global_steps):
461
+ self.reset_activation_shape()
462
+
463
+ eval_output = None
464
+
465
+ self._compute_loss = compute_loss
466
+
467
+ # Use the provided data iterator
468
+ train_iterator = self.data_iterator
469
+ self.set_dataiterator(data_iter)
470
+
471
+ # set the number micro batches in case the user chose value than training
472
+ micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches
473
+
474
+ # Do the work
475
+ sched = schedule.InferenceSchedule(micro_batches=self.micro_batches,
476
+ stages=self.num_stages,
477
+ stage_id=self.stage_id)
478
+
479
+ # prevent dead-lock with multiple evals sequence
480
+ dist.barrier()
481
+
482
+ with torch.no_grad():
483
+ self._exec_schedule(sched)
484
+
485
+ if self.is_last_stage():
486
+ eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output, micro_batches=micro_batches)
487
+
488
+ if compute_loss and (bcast_loss or self.monitor.enabled):
489
+ eval_output = self._bcast_pipe_scalar(eval_output)
490
+
491
+ if self.global_rank == 0 and self.monitor.enabled:
492
+ self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)]
493
+ self.monitor.write_events(self.summary_events)
494
+
495
+ # Restore the training iterator
496
+ self.set_dataiterator(train_iterator)
497
+
498
+ # Reset any buffers that may have been populated during the forward passes.
499
+ #ds_checkpointing.reset()
500
+ self.eval_return_logits = False
501
+ if return_logits:
502
+ outputs = self.outputs
503
+ self.outputs = None
504
+ return eval_output, outputs
505
+ return eval_output
506
+
507
+ def set_train_batch_size(self, train_batch_size):
508
+ """Adjust the global batch size by increasing or decreasing the number of
509
+ micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
510
+ (i.e., ``train_micro_batch_size_per_gpu``) is not changed.
511
+ Args:
512
+ train_batch_size (int): The new global batch size for training.
513
+ Raises:
514
+ ValueError: if ``train_batch_size`` is not divisible by the
515
+ configured micro-batch size and data parallelism.
516
+ """
517
+ super().set_train_batch_size(train_batch_size)
518
+ self.micro_batches = self.gradient_accumulation_steps()
519
+
520
+ def is_first_stage(self):
521
+ """True if this process is in the first stage in the pipeline."""
522
+ return self.stage_id == 0
523
+
524
+ def is_last_stage(self):
525
+ """True if this process is in the last stage in the pipeline."""
526
+ return self.stage_id == self.num_stages - 1
527
+
528
+ def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True, micro_batches=None):
529
+ if reduce is None:
530
+ return outputs
531
+
532
+ if reduce.lower() == 'avg':
533
+ # first sum over all microbatches
534
+ if torch.is_tensor(outputs[0]):
535
+ reduced = sum(outputs)
536
+ else:
537
+ assert isinstance(outputs, (list, tuple))
538
+ reduced = [torch.zeros_like(o) for o in outputs[0]]
539
+ for idx, out in outputs:
540
+ reduced[idx] += out
541
+
542
+ # Average over the microbatches
543
+ reduced = self._scale_loss_by_gas(reduced, eval_micro_batches=micro_batches)
544
+
545
+ # Average over DP groups
546
+ if reduce_dp and self.is_data_parallel:
547
+ if torch.is_tensor(reduced):
548
+ dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group())
549
+ reduced /= self.dp_world_size
550
+ else:
551
+ for idx in range(len(reduced)):
552
+ dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group())
553
+ reduced[idx] /= self.dp_world_size
554
+
555
+ return reduced
556
+ else:
557
+ raise NotImplementedError(f'reduction type {reduce} not supported.')
558
+
559
+ def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32):
560
+ # Default to last stage (e.g., for broadcasting loss)
561
+ if src_rank is None:
562
+ src_rank = self.grid.stage_to_global(self.num_stages - 1)
563
+ assert src_rank in self.grid.pp_group
564
+
565
+ if self.global_rank == src_rank:
566
+ result = data.clone().detach().type(dtype).to(self.device)
567
+ else:
568
+ result = torch.Tensor([0.]).type(dtype).to(self.device)
569
+
570
+ dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group())
571
+
572
+ return result
573
+
574
+ def _aggregate_total_loss(self):
575
+ # Scale loss, average among DP ranks, and bcast loss to the rest of my DP group
576
+ if self.is_last_stage():
577
+ # Scale loss and additional losses, if any
578
+ loss = self._scale_loss_by_gas(self.total_loss)
579
+ self.agg_additional_losses = self.total_additional_losses
580
+ if self.agg_additional_losses is not None:
581
+ self.agg_additional_losses = OrderedDict({
582
+ loss_name: self._scale_loss_by_gas(_loss.clone().detach())
583
+ for loss_name, _loss in self.agg_additional_losses.items()
584
+ })
585
+
586
+ self.dp_group_loss = loss.clone().detach()
587
+ agg_loss = self.dp_group_loss.clone().detach()
588
+ #print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True)
589
+
590
+ # Average loss across all data-parallel groups
591
+ if self.is_data_parallel:
592
+ if self.agg_additional_losses is None:
593
+ dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group())
594
+ agg_loss /= self.dp_world_size
595
+ else:
596
+ # use a single reduce op for agg_loss and additional losses, if any
597
+ assert '__train_loss__' not in self.agg_additional_losses.keys()
598
+ tensors = OrderedDict({'__train_loss__': agg_loss})
599
+ tensors.update(self.agg_additional_losses.items())
600
+ flat_tensor = torch.cat([t.clone().reshape(-1).detach() for t in tensors.values()])
601
+ dist.all_reduce(flat_tensor, group=self.mpu.get_data_parallel_group())
602
+ flat_tensor /= self.dp_world_size
603
+ offset = 0
604
+ reduced_tensor = {}
605
+ for name, t in tensors.items():
606
+ n_elem = t.numel()
607
+ reduced_tensor[name] = flat_tensor[offset:offset + n_elem].clone().detach().reshape(t.shape)
608
+ offset += n_elem
609
+ agg_loss = reduced_tensor['__train_loss__']
610
+ self.agg_additional_losses = OrderedDict(
611
+ {name: reduced_tensor[name]
612
+ for name in self.agg_additional_losses.keys()})
613
+
614
+ assert self.global_rank in self.grid.pp_group
615
+ losses = [self.dp_group_loss, agg_loss]
616
+ if self.agg_additional_losses is not None:
617
+ losses += list(self.agg_additional_losses.values())
618
+ losses = torch.stack(losses).float()
619
+ if self.is_pipe_parallel:
620
+ dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
621
+ else:
622
+ # Get loss from last stage
623
+ src_rank = self.grid.stage_to_global(self.num_stages - 1)
624
+ assert src_rank in self.grid.pp_group
625
+ # losses to reduce are: dp_group_loss, agg_loss, model additional losses
626
+ # therefore: 2 + n_additional_losses
627
+ additional_losses = self.module.get_additional_losses()
628
+ n_additional_losses = 0 if additional_losses is None else len(additional_losses)
629
+ losses = torch.Tensor([0.] * (2 + n_additional_losses)).to(self.device)
630
+ dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group())
631
+ self.dp_group_loss = losses[0].clone().detach()
632
+ agg_loss = losses[1].clone().detach()
633
+ if additional_losses is not None:
634
+ self.agg_additional_losses = OrderedDict(
635
+ {name: losses[2 + i].clone().detach()
636
+ for i, name in enumerate(additional_losses.keys())})
637
+ return agg_loss
638
+
639
+ def set_dataloader(self, loader):
640
+ """"""
641
+ if self.is_first_stage() or self.is_last_stage():
642
+ self.training_dataloader = loader
643
+ self.data_iterator = iter(self.training_dataloader)
644
+
645
+ def set_dataiterator(self, iterator):
646
+ """ Store an iterator to sample for training data. """
647
+ if self.is_first_stage() or self.is_last_stage():
648
+ self.training_dataloader = None
649
+ self.data_iterator = iterator
650
+
651
+ def set_batch_fn(self, fn):
652
+ """Execute a post-processing function on input data.
653
+
654
+ Args:
655
+ fn (function): The function to run.
656
+ """
657
+ self.batch_fn = fn
658
+
659
+ def is_gradient_accumulation_boundary(self):
660
+ """True if the engine is executing a gradient reduction or optimizer step instruction.
661
+
662
+ This is overridden from :class:`DeepSpeedEngine` to force reductions
663
+ and steps when the pipeline engine is instructed to do so.
664
+
665
+ Returns:
666
+ bool: whether reductions and optimizer steps should occur.
667
+ """
668
+ return self._force_grad_boundary
669
+
670
+ def log_for_device(self, *msg):
671
+ if LOG_STAGE == self.stage_id or LOG_STAGE == -1:
672
+ if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1:
673
+ print(
674
+ f'RANK={dist.get_rank()} '
675
+ f'PIPE-ID={self.stage_id} '
676
+ f'DATA-ID={self.grid.data_parallel_id} '
677
+ f'MBATCH-ID={self.microbatch_id} '
678
+ f'STEP-ID={self.log_batch_step_id} '
679
+ '::',
680
+ *msg,
681
+ flush=True)
682
+
683
+ def tput_log(self, *msg):
684
+ if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0:
685
+ print(*msg)
686
+
687
+ def _next_batch(self):
688
+ # If using 3D parallelism, only some first-stage ranks may do IO
689
+ batch = None
690
+ if self.data_iterator is not None:
691
+ batch = next(self.data_iterator)
692
+
693
+ # Any post-processing, like broadcasting across a slice-parallel group.
694
+ if self.batch_fn:
695
+ batch = self.batch_fn(batch)
696
+
697
+ return batch
698
+
699
+ def _exec_forward_pass(self, buffer_id):
700
+ self.tput_timer.start()
701
+ self.mem_status('BEFORE FWD', reset_max=True)
702
+
703
+ if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple):
704
+ inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id])
705
+ else:
706
+ inputs = self.pipe_buffers['inputs'][buffer_id].clone()
707
+
708
+ # collect the partitioned input from the previous stage
709
+ if self.is_pipe_partitioned and not self.is_first_stage():
710
+ if self.pipe_partition_input_meta_cache is None:
711
+ self.pipe_partition_input_meta_cache = inputs[0].to('cpu')
712
+ part_input = PartitionedTensor.from_meta(meta=self.pipe_partition_input_meta_cache,
713
+ local_part=inputs[1],
714
+ group=self.grid.get_slice_parallel_group())
715
+
716
+ inputs = (part_input.full(), *inputs[2:])
717
+ inputs[0].requires_grad = True
718
+ # skip mask
719
+ #inputs[1].requires_grad = True
720
+ part_input = None
721
+ inputs = inputs[0] if len(inputs) == 1 else inputs
722
+ self.pipe_buffers['inputs'][buffer_id] = inputs
723
+
724
+ # inputs has no gradient because it is from a cloned tensor
725
+ outputs = super().forward(inputs)
726
+
727
+ # Reset activation checkpointing buffers.
728
+ # Need to call this between evaluation iterations
729
+ if not self.module.training:
730
+ ds_checkpointing.reset()
731
+
732
+ # Partition the outputs if we are not the last stage
733
+ if self.is_pipe_partitioned and not self.is_last_stage():
734
+ if isinstance(outputs, tuple):
735
+ first_output = outputs[0]
736
+ # TODO: Improve pipe partitioning to pass multiple tensors that require grads
737
+ assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]])
738
+ outputs_tail = outputs[1:]
739
+ elif torch.is_tensor(outputs):
740
+ first_output = outputs
741
+ outputs_tail = []
742
+ else:
743
+ raise ValueError("expecting a tensor or a tuple of tensors")
744
+ part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group())
745
+ # Clear the large output data, but save the computation graph
746
+ first_output.data = torch.zeros(1)
747
+ self.pipe_buffers['output_tensors'][buffer_id] = first_output
748
+ # Inject the partitioned tensor into the output before sending
749
+ outputs = (part.to_meta(), part.data(), *outputs_tail)
750
+ part = None
751
+
752
+ self.pipe_buffers['outputs'][buffer_id] = outputs
753
+
754
+ # Optionally compute loss on the last device
755
+ if self.is_last_stage():
756
+ if self._compute_loss and self.module.loss_fn is not None:
757
+ labels = self.pipe_buffers['labels'][buffer_id]
758
+ self.loss = self.module.loss_fn(outputs, labels)
759
+ else:
760
+ # Some models just return loss from forward()
761
+ self.loss = outputs
762
+ if self.eval_return_logits:
763
+ self.outputs = outputs
764
+
765
+ if isinstance(self.loss, torch.Tensor):
766
+ self.fwd_outputs.append(self.loss.detach())
767
+ else:
768
+ self.fwd_outputs.append([l.detach() for l in self.loss])
769
+
770
+ def add_to_total_loss(_total_loss, _loss):
771
+ if isinstance(_loss, torch.Tensor):
772
+ if _total_loss is None:
773
+ _total_loss = torch.zeros_like(_loss)
774
+ _total_loss += _loss.detach()
775
+ else:
776
+ if _total_loss is None:
777
+ _total_loss = [torch.zeros_like(_l) for _l in _loss]
778
+ for _idx, _l in enumerate(_loss):
779
+ _total_loss[_idx] += _l.detach()
780
+ return _total_loss
781
+
782
+ self.total_loss = add_to_total_loss(self.total_loss, self.loss)
783
+
784
+ # aggregate additional losses across gradient accumulation steps
785
+ additional_losses = self.module.get_additional_losses()
786
+ if additional_losses is not None:
787
+ if self.total_additional_losses is None:
788
+ self.total_additional_losses = OrderedDict()
789
+ for name, loss in additional_losses.items():
790
+ total = self.total_additional_losses[name] if name in self.total_additional_losses else None
791
+ self.total_additional_losses[name] = add_to_total_loss(total, loss)
792
+
793
+ def _exec_backward_pass(self, buffer_id):
794
+ assert self.optimizer is not None, "must provide optimizer during " \
795
+ "init in order to use backward"
796
+
797
+ self.mem_status('BEFORE BWD', reset_max=True)
798
+
799
+ # The last stage just runs backward on the loss using DeepSpeed's typical
800
+ # mechanisms.
801
+ if self.is_last_stage():
802
+ super().backward(self.loss)
803
+ self.mem_status('AFTER BWD')
804
+ return
805
+
806
+ outputs = self.pipe_buffers['outputs'][buffer_id]
807
+
808
+ if self.wall_clock_breakdown():
809
+ self.timers(BACKWARD_MICRO_TIMER).start()
810
+ self.timers(BACKWARD_GLOBAL_TIMER).start()
811
+ self.timers(BACKWARD_INNER_MICRO_TIMER).start()
812
+ self.timers(BACKWARD_INNER_GLOBAL_TIMER).start()
813
+
814
+ # Reconstruct if we previously partitioned the output. We must be
815
+ # careful to also restore the computational graph of the tensors we partitioned.
816
+ if self.is_pipe_partitioned:
817
+ if self.is_grad_partitioned:
818
+ if self.pipe_partition_output_meta_cache is None:
819
+ self.pipe_partition_output_meta_cache = outputs[0].to('cpu')
820
+ part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_output_meta_cache,
821
+ local_part=outputs[1],
822
+ group=self.grid.get_slice_parallel_group())
823
+ self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full()
824
+ outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:])
825
+ else:
826
+ # Already restored from partition
827
+ self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0]
828
+ outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:])
829
+
830
+ grad_tensors = self.grad_layer
831
+ if self.is_grad_partitioned:
832
+ #print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
833
+ if self.grad_partition_grad_layer_meta_cache is None:
834
+ self.grad_partition_grad_layer_meta_cache = self.grad_layer[0].to('cpu')
835
+ part_grad = PartitionedTensor.from_meta(meta=self.grad_partition_grad_layer_meta_cache,
836
+ local_part=self.grad_layer[1],
837
+ group=self.grid.get_slice_parallel_group())
838
+ grad_tensors = (part_grad.full(), *grad_tensors[2:])
839
+ part_grad = None
840
+ #print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
841
+
842
+ if self.using_bf16_optimizer and not self.is_last_stage():
843
+ # manually call because we don't call optimizer.backward()
844
+ self.optimizer.clear_lp_grads()
845
+
846
+ # This handles either a single tensor or tuple of tensors.
847
+ if isinstance(outputs, tuple):
848
+ out_tensors = [t for t in outputs if t.is_floating_point()]
849
+ assert len(out_tensors) == len(grad_tensors)
850
+ torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors)
851
+ else:
852
+ torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
853
+
854
+ if self.using_bf16_optimizer and not self.is_last_stage():
855
+ # manually call because we don't call optimizer.backward()
856
+ self.optimizer.update_hp_grads(clear_lp_grads=False)
857
+
858
+ # Free up the memory from the output of forward()
859
+ self.pipe_buffers['output_tensors'][buffer_id] = None
860
+ self.pipe_buffers['outputs'][buffer_id] = None
861
+ grad_tensors = None
862
+
863
+ if self.wall_clock_breakdown():
864
+ self.timers(BACKWARD_INNER_MICRO_TIMER).stop()
865
+ self.timers(BACKWARD_INNER_GLOBAL_TIMER).stop()
866
+ self.timers(BACKWARD_MICRO_TIMER).stop()
867
+ self.timers(BACKWARD_GLOBAL_TIMER).stop()
868
+
869
+ self.mem_status('AFTER BWD')
870
+
871
+ def _exec_load_micro_batch(self, buffer_id):
872
+ if self.wall_clock_breakdown():
873
+ self.timers(BATCH_INPUT_TIMER).start()
874
+
875
+ batch = self._next_batch()
876
+
877
+ if self.is_first_stage():
878
+ loaded = None
879
+ if torch.is_tensor(batch[0]):
880
+ loaded = batch[0].clone().to(self.device).detach()
881
+ if self._config.pipeline['activation_checkpoint_interval'] > 0 and self._config.pipeline[
882
+ 'use_reentrant']:
883
+ loaded.requires_grad = loaded.is_floating_point()
884
+ else:
885
+ assert isinstance(batch[0], (tuple, list))
886
+ # Assume list or tuple
887
+ loaded = []
888
+ for x in batch[0]:
889
+ assert torch.is_tensor(x)
890
+ mine = x.clone().detach().to(self.device)
891
+ if self._config.pipeline['activation_checkpoint_interval'] > 0 and self._config.pipeline[
892
+ 'use_reentrant']:
893
+ mine.requires_grad = mine.is_floating_point()
894
+ loaded.append(mine)
895
+ loaded = tuple(loaded)
896
+
897
+ self.pipe_buffers['inputs'][buffer_id] = loaded
898
+
899
+ if self.is_last_stage():
900
+ loaded = batch[1]
901
+ if torch.is_tensor(batch[1]):
902
+ loaded = batch[1].to(self.device)
903
+ # XXX: torch 1.6.0 DataLoader will auto convert tuple to list
904
+ elif isinstance(batch[1], (tuple, list)):
905
+ loaded = []
906
+ for x in batch[1]:
907
+ assert torch.is_tensor(x)
908
+ x = x.to(self.device).detach()
909
+ loaded.append(x)
910
+ loaded = tuple(loaded)
911
+
912
+ self.pipe_buffers['labels'][buffer_id] = loaded
913
+
914
+ if self.wall_clock_breakdown():
915
+ self.timers(BATCH_INPUT_TIMER).stop()
916
+
917
+ def _send_tensor_meta(self, buffer, recv_stage):
918
+ """ Communicate metadata about upcoming p2p transfers.
919
+
920
+ Metadata is communicated in this order:
921
+ * type (0: tensor, 1: list)
922
+ * num_tensors if type=list
923
+ foreach tensor in buffer:
924
+ * ndims
925
+ * shape
926
+ """
927
+ send_bytes = 0
928
+ if isinstance(buffer, torch.Tensor):
929
+ type_tensor = torch.LongTensor(data=[0]).to(self.device)
930
+ p2p.send(type_tensor, recv_stage)
931
+ send_shape = torch.LongTensor(data=buffer.size()).to(self.device)
932
+ send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device)
933
+ p2p.send(send_ndims, recv_stage)
934
+ p2p.send(send_shape, recv_stage)
935
+ send_bytes += _tensor_bytes(buffer)
936
+ elif isinstance(buffer, list):
937
+ assert (False)
938
+ type_tensor = torch.LongTensor(data=[1]).to(self.device)
939
+ p2p.send(type_tensor, recv_stage)
940
+ count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
941
+ p2p.send(count_tensor, recv_stage)
942
+ for tensor in buffer:
943
+ assert isinstance(tensor, torch.Tensor)
944
+ send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
945
+ send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
946
+ p2p.send(send_ndims, recv_stage)
947
+ p2p.send(send_shape, recv_stage)
948
+ send_bytes += _tensor_bytes(tensor)
949
+ elif isinstance(buffer, tuple):
950
+ type_tensor = torch.LongTensor(data=[2]).to(self.device)
951
+ p2p.send(type_tensor, recv_stage)
952
+ count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
953
+ p2p.send(count_tensor, recv_stage)
954
+ for idx, tensor in enumerate(buffer):
955
+ assert isinstance(tensor, torch.Tensor)
956
+ send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
957
+ send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
958
+ send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device)
959
+ p2p.send(send_dtype, recv_stage)
960
+ p2p.send(send_ndims, recv_stage)
961
+ p2p.send(send_shape, recv_stage)
962
+ # Useful for performance debugging.
963
+ '''
964
+ new_bytes = _tensor_bytes(tensor)
965
+ send_bytes += _tensor_bytes(tensor)
966
+ # Useful for performance debugging.
967
+ if self.grid.data_parallel_id == 0:
968
+ print(
969
+ f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB'
970
+ )
971
+ '''
972
+ else:
973
+ raise NotImplementedError(f'Could not send meta type {type(buffer)}')
974
+
975
+ # Useful for performance debugging.
976
+ '''
977
+ if self.grid.data_parallel_id == 0:
978
+ print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB')
979
+ '''
980
+
981
+ def _recv_tensor_meta(self, send_stage):
982
+ """Receive metadata about upcoming p2p transfers and return allocated buffers.
983
+
984
+ Metadata is communicated in this order:
985
+ * type (0: tensor, 1: list)
986
+ * num_tensors if type=list
987
+ foreach tensor in buffer:
988
+ * ndims
989
+ * shape
990
+
991
+ Returns:
992
+ Allocated buffer for receiving from send_stage.
993
+ """
994
+
995
+ type_tensor = torch.LongTensor(data=[0]).to(self.device)
996
+ p2p.recv(type_tensor, send_stage)
997
+ recv_type = type_tensor.item()
998
+
999
+ # A single tensor will be sent.
1000
+ if recv_type == 0:
1001
+ recv_ndims = torch.LongTensor(data=[0]).to(self.device)
1002
+ p2p.recv(recv_ndims, send_stage)
1003
+ recv_ndims = recv_ndims.item()
1004
+ recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
1005
+ p2p.recv(recv_shape, send_stage)
1006
+ recv_shape = recv_shape.tolist()
1007
+ return self._allocate_buffer(recv_shape, num_buffers=1)[0]
1008
+
1009
+ # List or tuple of tensors
1010
+ elif recv_type == 1 or recv_type == 2:
1011
+ count_tensor = torch.LongTensor(data=[0]).to(self.device)
1012
+ p2p.recv(count_tensor, send_stage)
1013
+ num_tensors = count_tensor.item()
1014
+ recv_shapes_and_dtypes = []
1015
+ for idx in range(num_tensors):
1016
+ recv_dtype = torch.LongTensor(data=[0]).to(self.device)
1017
+ p2p.recv(recv_dtype, send_stage)
1018
+ recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()]
1019
+ recv_ndims = torch.LongTensor(data=[0]).to(self.device)
1020
+ p2p.recv(recv_ndims, send_stage)
1021
+ recv_ndims = recv_ndims.item()
1022
+ recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
1023
+ p2p.recv(recv_shape, send_stage)
1024
+ recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype))
1025
+
1026
+ buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0]
1027
+ # Convert to tuples if requested.
1028
+ if recv_type == 2:
1029
+ buffers = tuple(buffers)
1030
+ return buffers
1031
+
1032
+ else:
1033
+ raise NotImplementedError(f'Could not receive type {type(recv_type)}')
1034
+
1035
+ def _exec_send_activations(self, buffer_id):
1036
+ if self.wall_clock_breakdown():
1037
+ self.timers(PIPE_SEND_OUTPUT_TIMER).start()
1038
+
1039
+ outputs = self.pipe_buffers['outputs'][buffer_id]
1040
+
1041
+ # NCCL does not like to send torch.BoolTensor types, so cast the mask to half().
1042
+ # We could do char, but with half() we can eventually flatten with other fp16
1043
+ # messages (TODO)
1044
+ if self.has_attention_mask or self.has_bool_tensors:
1045
+ outputs = list(outputs)
1046
+ outputs[-1] = outputs[-1].half()
1047
+ outputs = tuple(outputs)
1048
+
1049
+ if self.first_output_send:
1050
+ self.first_output_send = False
1051
+ self._send_tensor_meta(outputs, self.next_stage)
1052
+
1053
+ if isinstance(outputs, torch.Tensor):
1054
+ p2p.send(outputs, self.next_stage)
1055
+ elif isinstance(outputs, tuple):
1056
+ for idx, buffer in enumerate(outputs):
1057
+ p2p.send(buffer, self.next_stage)
1058
+ else:
1059
+ raise NotImplementedError('Could not send output of type '
1060
+ f'{type(outputs)}')
1061
+
1062
+ # Restore the boolean tensor
1063
+ if self.has_attention_mask or self.has_bool_tensors:
1064
+ outputs = list(outputs)
1065
+ outputs[-1] = outputs[-1].bool()
1066
+ outputs = tuple(outputs)
1067
+
1068
+ if self.wall_clock_breakdown():
1069
+ self.timers(PIPE_SEND_OUTPUT_TIMER).stop()
1070
+
1071
+ def _exec_send_grads(self, buffer_id):
1072
+ if self.wall_clock_breakdown():
1073
+ self.timers(PIPE_SEND_GRAD_TIMER).start()
1074
+
1075
+ inputs = self.pipe_buffers['inputs'][buffer_id]
1076
+
1077
+ # Partition the gradient
1078
+ if self.is_grad_partitioned:
1079
+ if isinstance(inputs, tuple):
1080
+ first_input = inputs[0]
1081
+ assert all([torch.is_tensor(elt) for elt in inputs[1:]])
1082
+ inputs_grad_tail = [elt.grad for elt in inputs[1:]]
1083
+ elif torch.is_tensor(inputs):
1084
+ first_input = inputs
1085
+ inputs_grad_tail = []
1086
+ else:
1087
+ raise ValueError("expecting a tensor or a tuple of tensors")
1088
+ assert torch.is_tensor(first_input)
1089
+ part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group())
1090
+
1091
+ inputs = (part.to_meta(), part.data(), *inputs_grad_tail)
1092
+
1093
+ # XXX Terrible hack
1094
+ # Drop the attention mask from the input buffer here. It does not have
1095
+ # a grad that needs to be communicated. We free the buffer immediately
1096
+ # after, so no need to restore it. The receiver also has a hack that skips
1097
+ # the recv. This is because NCCL does not let us send torch.BoolTensor :-(.
1098
+ if self.has_attention_mask or self.has_bool_tensors:
1099
+ inputs = list(inputs)
1100
+ inputs.pop()
1101
+ inputs = tuple(inputs)
1102
+
1103
+ if isinstance(inputs, torch.Tensor):
1104
+ assert inputs.grad is not None
1105
+ p2p.send(inputs.grad, self.prev_stage)
1106
+ else:
1107
+ # XXX terrible hacky branch
1108
+ if self.is_grad_partitioned:
1109
+ # First two sends are partitioned gradient
1110
+ p2p.send(inputs[0], self.prev_stage)
1111
+ p2p.send(inputs[1], self.prev_stage)
1112
+ else:
1113
+ for idx, buffer in enumerate(inputs):
1114
+ # Skip tensors that will not produce a grad
1115
+ if not buffer.is_floating_point():
1116
+ assert buffer.grad is None
1117
+ continue
1118
+ assert buffer.grad is not None
1119
+ p2p.send(buffer.grad, self.prev_stage)
1120
+
1121
+ # We can free up the input buffer now
1122
+ self.pipe_buffers['inputs'][buffer_id] = None
1123
+
1124
+ if self.wall_clock_breakdown():
1125
+ self.timers(PIPE_SEND_GRAD_TIMER).stop()
1126
+
1127
+ def _exec_recv_activations(self, buffer_id):
1128
+ if self.wall_clock_breakdown():
1129
+ self.timers(PIPE_RECV_INPUT_TIMER).start()
1130
+
1131
+ recvd = None
1132
+
1133
+ # Allocate the buffer if necessary
1134
+ if self.pipe_recv_buf is None:
1135
+ self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage)
1136
+
1137
+ if isinstance(self.pipe_recv_buf, torch.Tensor):
1138
+ p2p.recv(self.pipe_recv_buf, self.prev_stage)
1139
+ recvd = self.pipe_recv_buf.clone().detach()
1140
+ recvd.requires_grad = recvd.is_floating_point()
1141
+ else:
1142
+ assert isinstance(self.pipe_recv_buf, tuple)
1143
+ recvd = [None] * len(self.pipe_recv_buf)
1144
+ for idx, buffer in enumerate(self.pipe_recv_buf):
1145
+ assert torch.is_tensor(buffer)
1146
+ # XXX hardcode meta type
1147
+ if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long:
1148
+ if self.meta_buffer is None:
1149
+ self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
1150
+ buffer = self.meta_buffer
1151
+
1152
+ p2p.recv(buffer, self.prev_stage)
1153
+ recvd[idx] = buffer.clone().detach()
1154
+
1155
+ # NCCL does not like to send torch.BoolTensor types, so un-cast the
1156
+ # attention mask
1157
+ if self.has_attention_mask or self.has_bool_tensors:
1158
+ recvd[-1] = recvd[-1].bool()
1159
+
1160
+ recvd = tuple(recvd)
1161
+
1162
+ for buffer in recvd:
1163
+ buffer.requires_grad = buffer.is_floating_point()
1164
+
1165
+ self.pipe_buffers['inputs'][buffer_id] = recvd
1166
+
1167
+ if self.wall_clock_breakdown():
1168
+ self.timers(PIPE_RECV_INPUT_TIMER).stop()
1169
+
1170
+ def _exec_recv_grads(self, buffer_id):
1171
+ if self.wall_clock_breakdown():
1172
+ self.timers(PIPE_RECV_GRAD_TIMER).start()
1173
+
1174
+ outputs = self.pipe_buffers['outputs'][buffer_id]
1175
+ # XXX these shapes are hardcoded for Megatron
1176
+ # Restore partitioned output if it was partitioned and we are sending full gradients
1177
+ if self.is_pipe_partitioned and not self.is_grad_partitioned:
1178
+ if self.pipe_partition_grad_meta_cache is None:
1179
+ self.pipe_partition_grad_meta_cache = outputs[0].to('cpu')
1180
+ part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_grad_meta_cache,
1181
+ local_part=outputs[1],
1182
+ group=self.grid.get_slice_parallel_group())
1183
+ outputs[0].data = part_output.full()
1184
+ outputs = (outputs[0], *outputs[2:])
1185
+ # save for backward
1186
+ self.pipe_buffers['outputs'][buffer_id] = outputs
1187
+
1188
+ # Allocate gradient if necessary
1189
+ if self.grad_layer is None:
1190
+ if isinstance(outputs, torch.Tensor):
1191
+ s = list(outputs.size())
1192
+ self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0]
1193
+ else:
1194
+ # XXX This is a HACK
1195
+ # When we exchange activations/gradients, the two pipe stages
1196
+ # need to issue the send/recv with the same buffer sizes or
1197
+ # else there is a deadlock. The is_floating_point() filter is
1198
+ # used to avoid sending gradients for tensors that do not
1199
+ # produce gradients. When TP>1, we partition the first
1200
+ # activations/gradients across TP ranks to save communication
1201
+ # volume and memory. That partitioned tensor is represented as
1202
+ # two tensors: a 1/TPth chunk of the original data and also a
1203
+ # small LongTensor storing the metadata used to reconstruct on
1204
+ # the other side. When combined, the floating point filter also
1205
+ # filtered out the metadata tensor. This quick (hacky) fix just
1206
+ # branches on is_grad_partitioned so we don't filter out the
1207
+ # metadata tensor.
1208
+ if self.is_grad_partitioned:
1209
+ sizes_and_dtypes = [(list(t.size()), t.dtype)
1210
+ for t in outputs[:2]] + [(list(t.size()), t.dtype)
1211
+ for t in outputs[2:] if t.is_floating_point()]
1212
+ else:
1213
+ sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()]
1214
+ self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0]
1215
+
1216
+ if isinstance(self.grad_layer, torch.Tensor):
1217
+ p2p.recv(self.grad_layer, self.next_stage)
1218
+ else:
1219
+ assert isinstance(outputs, tuple)
1220
+ for idx, buffer in enumerate(self.grad_layer):
1221
+ # XXX GPT-2 hack
1222
+ if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long:
1223
+ buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
1224
+ p2p.recv(buffer, self.next_stage)
1225
+
1226
+ if self.wall_clock_breakdown():
1227
+ self.timers(PIPE_RECV_GRAD_TIMER).stop()
1228
+
1229
+ def _exec_optimizer_step(self, lr_kwargs=None):
1230
+ if self.wall_clock_breakdown():
1231
+ self.timers(STEP_MICRO_TIMER).start()
1232
+ self.timers(STEP_GLOBAL_TIMER).start()
1233
+ self.mem_status('BEFORE STEP', reset_max=True)
1234
+
1235
+ self._force_grad_boundary = True
1236
+ self._take_model_step(lr_kwargs)
1237
+ self._force_grad_boundary = False
1238
+
1239
+ self.mem_status('AFTER STEP')
1240
+
1241
+ if self.global_rank == 0 and self.monitor.enabled:
1242
+ self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)]
1243
+ if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
1244
+ self.summary_events.append(
1245
+ (f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples))
1246
+ self.monitor.write_events(self.summary_events)
1247
+
1248
+ if self.wall_clock_breakdown():
1249
+ self.timers(STEP_MICRO_TIMER).stop()
1250
+ self.timers(STEP_GLOBAL_TIMER).stop()
1251
+ if self.global_steps % self.steps_per_print() == 0:
1252
+ self.timers.log([
1253
+ BATCH_INPUT_TIMER,
1254
+ FORWARD_MICRO_TIMER,
1255
+ BACKWARD_MICRO_TIMER,
1256
+ BACKWARD_INNER_MICRO_TIMER,
1257
+ BACKWARD_REDUCE_MICRO_TIMER,
1258
+ STEP_MICRO_TIMER,
1259
+ ])
1260
+ if self.global_steps % self.steps_per_print() == 0:
1261
+ self.timers.log([
1262
+ FORWARD_GLOBAL_TIMER,
1263
+ BACKWARD_GLOBAL_TIMER,
1264
+ BACKWARD_INNER_GLOBAL_TIMER,
1265
+ BACKWARD_REDUCE_GLOBAL_TIMER,
1266
+ STEP_GLOBAL_TIMER,
1267
+ ])
1268
+
1269
+ def _allocate_zeros(self, shape, **kwargs):
1270
+ """ Allocate a tensor of zeros on the engine's device.
1271
+
1272
+ Arguments:
1273
+ shape: the shape of the tensor to allocate
1274
+ kwargs: passed to torch.zeros()
1275
+
1276
+ Returns:
1277
+ A tensor from torch.zeros() allocated on self.device.
1278
+ """
1279
+ if "dtype" not in kwargs:
1280
+ if self.fp16_enabled():
1281
+ kwargs["dtype"] = torch.half
1282
+ if self.bfloat16_enabled():
1283
+ kwargs["dtype"] = torch.bfloat16
1284
+
1285
+ return torch.zeros(shape, device=self.device, **kwargs)
1286
+
1287
+ def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
1288
+ buffers = []
1289
+ if num_buffers == -1:
1290
+ num_buffers = self.num_pipe_buffers
1291
+ for count in range(num_buffers):
1292
+ buffers.append(self._allocate_zeros(shape, **kwargs))
1293
+ return buffers
1294
+
1295
+ def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1):
1296
+ buffers = []
1297
+ if num_buffers == -1:
1298
+ num_buffers = self.num_pipe_buffers
1299
+ for count in range(num_buffers):
1300
+ buffer = []
1301
+ for shape, dtype in shapes_and_dtypes:
1302
+ buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad))
1303
+ buffers.append(buffer)
1304
+ return buffers
1305
+
1306
+ def forward(self, *args, **kwargs):
1307
+ """Disabled for pipeline parallel training. See ``train_batch()``. """
1308
+ raise PipelineError("Only train_batch() is accessible in pipeline mode.")
1309
+
1310
+ def backward(self, *args, **kwargs):
1311
+ """Disabled for pipeline parallel training. See ``train_batch()``. """
1312
+ raise PipelineError("Only train_batch() is accessible in pipeline mode.")
1313
+
1314
+ def step(self, *args, **kwargs):
1315
+ """Disabled for pipeline parallel training. See ``train_batch()``. """
1316
+ raise PipelineError("Only train_batch() is accessible in pipeline mode.")
1317
+
1318
+ def mem_status(self, msg, print_rank=-1, reset_max=False):
1319
+ return
1320
+ global mem_alloced, mem_cached
1321
+ if not self.global_steps == 0 or not self.global_steps == 9:
1322
+ #return
1323
+ pass
1324
+ if self.mpu.get_data_parallel_rank() != 0:
1325
+ return
1326
+
1327
+ if self.global_rank != 0:
1328
+ return
1329
+
1330
+ rank = self.global_rank
1331
+ if print_rank != -1 and rank != print_rank:
1332
+ return
1333
+
1334
+ get_accelerator().synchronize()
1335
+
1336
+ if reset_max:
1337
+ get_accelerator().reset_max_memory_cached()
1338
+ get_accelerator().reset_max_memory_allocated()
1339
+
1340
+ new_alloced = get_accelerator().memory_allocated()
1341
+ new_cached = get_accelerator().memory_cached()
1342
+
1343
+ delta_alloced = new_alloced - mem_alloced
1344
+ delta_cached = new_cached - mem_cached
1345
+
1346
+ mem_cached = new_cached
1347
+ mem_alloced = new_alloced
1348
+
1349
+ max_alloced = get_accelerator().max_memory_allocated()
1350
+ max_cached = get_accelerator().max_memory_cached()
1351
+
1352
+ # convert to GB for printing
1353
+ new_alloced /= 1024**3
1354
+ new_cached /= 1024**3
1355
+ delta_alloced /= 1024**3
1356
+ delta_cached /= 1024**3
1357
+ max_alloced /= 1024**3
1358
+ max_cached /= 1024**3
1359
+
1360
+ print(
1361
+ f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg,
1362
+ f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
1363
+ f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
1364
+
1365
+ def module_state_dict(self, exclude_frozen_parameters=False):
1366
+ """Override hack to save a pipe model and return the directory path of the save.
1367
+
1368
+ This method should only be called by DeepSpeed's ``save_checkpoint()``. The
1369
+ recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()``
1370
+ is ``save_state_dict()``.
1371
+
1372
+ Returns:
1373
+ None
1374
+ """
1375
+ assert isinstance(self.module, PipelineModule)
1376
+ assert self._curr_ckpt_path is not None, \
1377
+ "PipelineEngine expects module_state_dict() to be called from save_checkpoint()"
1378
+
1379
+ self.module.save_state_dict(self._curr_ckpt_path,
1380
+ checkpoint_engine=self.checkpoint_engine,
1381
+ exclude_frozen_params=exclude_frozen_parameters)
1382
+ return None
1383
+
1384
+ def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None, fetch_z3_params=False):
1385
+ """Override hack to instead use a directory path.
1386
+
1387
+ This is important because pipeline models checkpoint by layer instead of rank.
1388
+
1389
+ If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``.
1390
+
1391
+ Args:
1392
+ state_dict (str, None): unused
1393
+ strict (bool, optional): Strict state loading. Defaults to True.
1394
+ """
1395
+ assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism"
1396
+ state_dict = checkpoint if self.has_moe_layers else checkpoint['module']
1397
+ if (state_dict is not None) and (not isinstance(state_dict, str)):
1398
+ super().load_module_state_dict(state_dict, strict)
1399
+ return
1400
+
1401
+ self.module.load_state_dir(load_dir=self._curr_ckpt_path,
1402
+ strict=strict,
1403
+ checkpoint_engine=self.checkpoint_engine)
1404
+
1405
+ # A map of PipeInstruction types to methods. Each method will be executed with the
1406
+ # kwargs provided to the PipeInstruction from the scheduler.
1407
+ _INSTRUCTION_MAP = {
1408
+ schedule.OptimizerStep: _exec_optimizer_step,
1409
+ schedule.ReduceGrads: _exec_reduce_grads,
1410
+ schedule.ReduceTiedGrads: _exec_reduce_tied_grads,
1411
+ schedule.LoadMicroBatch: _exec_load_micro_batch,
1412
+ schedule.ForwardPass: _exec_forward_pass,
1413
+ schedule.BackwardPass: _exec_backward_pass,
1414
+ schedule.SendActivation: _exec_send_activations,
1415
+ schedule.RecvActivation: _exec_recv_activations,
1416
+ schedule.SendGrad: _exec_send_grads,
1417
+ schedule.RecvGrad: _exec_recv_grads,
1418
+ }
1419
+
1420
+ def _exec_schedule(self, pipe_schedule):
1421
+ # Reserve and reset buffers.
1422
+ self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers())
1423
+ self.fwd_outputs = []
1424
+
1425
+ # For each step in the schedule
1426
+ for step_cmds in pipe_schedule:
1427
+ # For each instruction in the step
1428
+ for cmd in step_cmds:
1429
+ if type(cmd) not in self._INSTRUCTION_MAP:
1430
+ raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}')
1431
+
1432
+ # Equivalent to: self._exec_forward_pass(buffer_id=0)
1433
+ self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self)
1434
+ self._exec_instr(**cmd.kwargs)
1435
+
1436
+ def get_additional_losses(self):
1437
+ return self.agg_additional_losses
venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/schedule.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from ..utils import call_to_str
7
+
8
+ from abc import ABC, abstractmethod
9
+
10
+
11
+ class PipeSchedule(ABC):
12
+ """Directs the execution of a pipeline engine by generating sequences of
13
+ :class:`PipeInstruction`.
14
+
15
+ Schedules are generators that yield sequences of
16
+ :class:`PipeInstruction` to process the micro-batches in one batch.
17
+ Each yielded step is atomic in the sense that a barrier
18
+ synchronization can be placed between successive steps without
19
+ deadlock.
20
+
21
+ Below is an example schedule that implements data parallelism with gradient accumulation:
22
+
23
+ .. code-block:: python
24
+
25
+ class DataParallelSchedule(PipeSchedule):
26
+ def steps(self):
27
+ for step_id in range(self.micro_batches):
28
+ cmds = [
29
+ LoadMicroBatch(buffer_id=0),
30
+ ForwardPass(buffer_id=0),
31
+ BackwardPass(buffer_id=0),
32
+ ]
33
+ if step_id == self.micro_batches - 1:
34
+ cmds.extend([
35
+ ReduceGrads(),
36
+ OptimizerStep(),
37
+ ])
38
+ yield cmds
39
+
40
+ def num_pipe_buffers(self):
41
+ return 1
42
+
43
+ Args:
44
+ micro_batches (int): The number of micro-batches that comprise a batch.
45
+ stages (int): The number of pipeline stages.
46
+ stage_id (int): The pipe stage that will execute the generated schedule.
47
+ """
48
+
49
+ def __init__(self, micro_batches, stages, stage_id):
50
+ super().__init__()
51
+ self.micro_batches = micro_batches
52
+ self.stages = stages
53
+ self.stage_id = stage_id
54
+ self.prev_stage = self.stage_id - 1
55
+ self.next_stage = self.stage_id + 1
56
+
57
+ @abstractmethod
58
+ def steps(self):
59
+ """Yield a list of :class:`PipeInstruction` for each step in the schedule.
60
+
61
+ .. note::
62
+ Schedules must implement ``steps()`` to define the schedule.
63
+
64
+ Returns:
65
+ Instructions to be executed as one step of the pipeline
66
+ """
67
+ pass
68
+
69
+ def num_pipe_buffers(self):
70
+ """The number of pipeline buffers that will be used by this stage.
71
+
72
+ .. note::
73
+ Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale.
74
+
75
+ Returns:
76
+ The number of buffers for the engine to allocate.
77
+ """
78
+ return self.micro_batches
79
+
80
+ def _valid_micro_batch(self, micro_batch_id):
81
+ return 0 <= micro_batch_id < self.micro_batches
82
+
83
+ def _valid_stage(self, stage_id):
84
+ return 0 <= stage_id < self.stages
85
+
86
+ @property
87
+ def stage(self):
88
+ """Stage index used to configure this schedule."""
89
+ return self.stage_id
90
+
91
+ @property
92
+ def num_stages(self):
93
+ """The number of total pipeline stages used to configure this schedule."""
94
+ return self.stages
95
+
96
+ @property
97
+ def num_micro_batches(self):
98
+ """The number of total micro_batches used to configure this schedule."""
99
+ return self.micro_batches
100
+
101
+ @property
102
+ def is_first_stage(self):
103
+ """True if the configured ``stage_id`` is the first stage in the pipeline."""
104
+ return self.stage_id == 0
105
+
106
+ @property
107
+ def is_last_stage(self):
108
+ """True if the configured ``stage_id`` is the last stage in the pipeline."""
109
+ return self.stage_id == self.stages - 1
110
+
111
+ def _buffer_idx(self, micro_batch_id):
112
+ """Map a micro-batch index to a pipeline buffer index.
113
+
114
+ This method uses a cyclic allocation strategy.
115
+
116
+ Args:
117
+ micro_batch_id (int): The micro-batch index relative to the beginning of the schedule.
118
+
119
+ Returns:
120
+ int: The index of the buffer that should store data.
121
+ """
122
+ assert self._valid_micro_batch(micro_batch_id)
123
+ return micro_batch_id % self.num_pipe_buffers()
124
+
125
+ def __iter__(self):
126
+ self.it = None
127
+ return self
128
+
129
+ def __next__(self):
130
+ if self.it is None:
131
+ self.it = self.steps()
132
+ return next(self.it)
133
+
134
+
135
+ class InferenceSchedule(PipeSchedule):
136
+ """A schedule for inferencing batches using pipeline parallelism.
137
+ """
138
+
139
+ def steps(self):
140
+ """"""
141
+ prev_micro_batch_id = -1
142
+ total_steps = self.micro_batches + self.stages - 1
143
+ for step_id in range(total_steps):
144
+ cmds = []
145
+ micro_batch_id = step_id - self.stage_id
146
+
147
+ # Alternate send/recv buffers
148
+ if _is_even(self.stage_id):
149
+ recv_buf = step_id % 2
150
+ send_buf = (step_id + 1) % 2
151
+ else:
152
+ recv_buf = (step_id + 1) % 2
153
+ send_buf = step_id % 2
154
+
155
+ if self.is_first_stage or self.is_last_stage:
156
+ if self._valid_micro_batch(micro_batch_id):
157
+ cmds.append(LoadMicroBatch(recv_buf))
158
+
159
+ if _is_even(self.stage_id):
160
+ if self._valid_stage(self.next_stage):
161
+ if self._valid_micro_batch(micro_batch_id - 1):
162
+ cmds.append(SendActivation(send_buf))
163
+ if self._valid_stage(self.prev_stage):
164
+ if self._valid_micro_batch(micro_batch_id):
165
+ cmds.append(RecvActivation(recv_buf))
166
+ else:
167
+ if self._valid_stage(self.prev_stage):
168
+ if self._valid_micro_batch(micro_batch_id):
169
+ cmds.append(RecvActivation(recv_buf))
170
+
171
+ if self._valid_stage(self.next_stage):
172
+ if self._valid_micro_batch(micro_batch_id - 1):
173
+ cmds.append(SendActivation(send_buf))
174
+
175
+ if self._valid_micro_batch(micro_batch_id):
176
+ cmds.append(ForwardPass(recv_buf))
177
+
178
+ yield cmds
179
+
180
+ def num_pipe_buffers(self):
181
+ """Only two pipeline buffers are required for inferencing.
182
+
183
+ Returns:
184
+ ``2``
185
+ """
186
+ return 2
187
+
188
+
189
+ class TrainSchedule(PipeSchedule):
190
+ """A schedule for training a batch using hybrid parallelism.
191
+
192
+ Pipeline parallelism is extracted through gradient accumulation and thus
193
+ convergence follows that of a data parallel approach with the same batch
194
+ size.
195
+ """
196
+
197
+ def steps(self):
198
+ """"""
199
+ prev_micro_batch_id = -1
200
+ total_steps = 2 * (self.micro_batches + self.stages - 1)
201
+ for step_id in range(total_steps):
202
+ # Map the step of the pipeline to the micro-batch id and also whether it is a
203
+ # forward or backward pass step.
204
+ micro_batch_id, is_forward = self._step_to_micro_batch(step_id)
205
+
206
+ if self._valid_micro_batch(prev_micro_batch_id):
207
+ prev_buffer = self._buffer_idx(prev_micro_batch_id)
208
+ if self._valid_micro_batch(micro_batch_id):
209
+ curr_buffer = self._buffer_idx(micro_batch_id)
210
+
211
+ cmds = []
212
+
213
+ # Exchange activations
214
+ if is_forward:
215
+ if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage):
216
+ cmds.append(SendGrad(prev_buffer))
217
+ if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage):
218
+ cmds.append(RecvActivation(curr_buffer))
219
+ else:
220
+ if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage):
221
+ cmds.append(RecvGrad(curr_buffer))
222
+ if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage):
223
+ cmds.append(SendActivation(prev_buffer))
224
+
225
+ # First/last stage loads
226
+ if self.stage_id == 0 or self.stage_id == self.stages - 1:
227
+ if is_forward and self._valid_micro_batch(micro_batch_id):
228
+ cmds.append(LoadMicroBatch(curr_buffer))
229
+
230
+ # Computation
231
+ if self._valid_micro_batch(micro_batch_id):
232
+ if is_forward:
233
+ cmds.append(ForwardPass(curr_buffer))
234
+ else:
235
+ cmds.append(BackwardPass(curr_buffer))
236
+
237
+ # Model step at the end of the batch
238
+ if step_id == total_steps - 1:
239
+ cmds.append(ReduceTiedGrads())
240
+ cmds.append(ReduceGrads())
241
+ cmds.append(OptimizerStep())
242
+
243
+ # Prepare state for next time
244
+ prev_micro_batch_id = micro_batch_id
245
+ yield cmds
246
+
247
+ def num_pipe_buffers(self):
248
+ """Return the number of pipeline buffers required for this stage.
249
+
250
+ This is equivalent to the maximum number of in-flight forward passes,
251
+ since we need to remember the activations of forward passes in order
252
+ to run backpropagation. For synchronous 1F1B, this is equivalent to
253
+ the index difference between this stage and the last stage.
254
+ """
255
+ buffers = min(self.stages - self.stage_id, self.micro_batches)
256
+ return max(2, buffers)
257
+
258
+ def _step_to_micro_batch(self, step_id):
259
+ if _is_even(step_id) and _is_even(self.stage_id):
260
+ micro_batch_id = self._even_step_forward_id(step_id)
261
+ is_forward = True
262
+
263
+ elif _is_odd(step_id) and _is_odd(self.stage_id):
264
+ micro_batch_id = self._odd_step_forward_id(step_id)
265
+ is_forward = True
266
+
267
+ elif _is_even(step_id) and _is_odd(self.stage_id):
268
+ micro_batch_id = self._even_step_backward_id(step_id)
269
+ is_forward = False
270
+
271
+ elif _is_odd(step_id) and _is_even(self.stage_id):
272
+ micro_batch_id = self._odd_step_backward_id(step_id)
273
+ is_forward = False
274
+
275
+ else:
276
+ assert False
277
+
278
+ return micro_batch_id, is_forward
279
+
280
+ def _even_step_forward_id(self, step_id):
281
+ base = step_id // 2
282
+ micro_batch_id = int(base - self.stage_id // 2)
283
+ return micro_batch_id
284
+
285
+ def _odd_step_forward_id(self, step_id):
286
+ base = (step_id - 1) // 2
287
+ micro_batch_id = int(base - self.stage_id // 2)
288
+ return micro_batch_id
289
+
290
+ def _even_step_backward_id(self, step_id):
291
+ base = step_id // 2
292
+ micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2)
293
+ return micro_batch_id
294
+
295
+ def _odd_step_backward_id(self, step_id):
296
+ base = ((step_id - 1) // 2) - self.stages + 1
297
+ micro_batch_id = int(base + self.stage_id // 2)
298
+ return micro_batch_id
299
+
300
+
301
+ class DataParallelSchedule(PipeSchedule):
302
+ """An example schedule that trains using traditional data parallelism with gradient
303
+ accumulation.
304
+ """
305
+
306
+ def steps(self):
307
+ """"""
308
+ for step_id in range(self.micro_batches):
309
+ cmds = [
310
+ LoadMicroBatch(buffer_id=0),
311
+ ForwardPass(buffer_id=0),
312
+ BackwardPass(buffer_id=0),
313
+ ]
314
+ if step_id == self.micro_batches - 1:
315
+ cmds.extend([
316
+ ReduceGrads(),
317
+ OptimizerStep(),
318
+ ])
319
+ yield cmds
320
+
321
+ def num_pipe_buffers(self):
322
+ """Only one pipeline buffer needed.
323
+ """
324
+ return 1
325
+
326
+
327
+ class PipeInstruction:
328
+ """Base class for all instructions to be executed by the pipeline engine.
329
+
330
+ All keyword arguments are stored as members similar to a ``namedtuple``. These are
331
+ then accessible to the :class:`PipeEngine` during execution.
332
+
333
+ Args:
334
+ kwargs (optional): keyword arguments to store as members
335
+ """
336
+
337
+ def __init__(self, **kwargs):
338
+ self.name = self.__class__.__name__
339
+ self.kwargs = kwargs
340
+ for key, val in kwargs.items():
341
+ setattr(self, key, val)
342
+
343
+ def __repr__(self):
344
+ return call_to_str(self.name, **self.kwargs)
345
+
346
+
347
+ class OptimizerStep(PipeInstruction):
348
+ """Performs one step with the optimizer and zeros gradients.
349
+
350
+ .. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`.
351
+
352
+ .. note:: Can be a synchronization point among data-parallel ranks.
353
+ """
354
+ pass
355
+
356
+
357
+ class ReduceGrads(PipeInstruction):
358
+ """Reduce the computed gradients among data-parallel processes within the stage.
359
+ """
360
+ pass
361
+
362
+
363
+ class ReduceTiedGrads(PipeInstruction):
364
+ """Reduce the computed gradients of tied modules within a pipeline-parallel group.
365
+
366
+ .. warning::
367
+ The stages included in this synchronization point are not known until
368
+ the model is partitioned among pipeline stages. In the worst case, it
369
+ includes all pipeline stages. This instruction should be scheduled
370
+ carefully to avoid deadlocks.
371
+ """
372
+ pass
373
+
374
+
375
+ class BufferOpInstruction(PipeInstruction):
376
+ """A pipeline instruction that operates on pipeline buffer(s).
377
+
378
+ Args:
379
+ buffer_id (int): the index of the pipeline buffer() to modify.
380
+ """
381
+
382
+ def __init__(self, buffer_id, **kwargs):
383
+ super().__init__(buffer_id=buffer_id, **kwargs)
384
+
385
+
386
+ # IO
387
+ class LoadMicroBatch(BufferOpInstruction):
388
+ """Load a micro-batch into a buffer.
389
+
390
+ Roughly:
391
+
392
+ .. code-block:: python
393
+
394
+ buffers['inputs'][buffer_id] = next(data_iter)
395
+ """
396
+ pass
397
+
398
+
399
+ # Compute
400
+ class ForwardPass(BufferOpInstruction):
401
+ """Compute a forward pass.
402
+
403
+ Roughly:
404
+
405
+ .. code-block:: python
406
+
407
+ buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id])
408
+ """
409
+ pass
410
+
411
+
412
+ class BackwardPass(BufferOpInstruction):
413
+ """Compute a backward pass and accumulate gradients.
414
+
415
+ Roughly:
416
+
417
+ .. code-block:: python
418
+
419
+ outputs = buffers['outputs'][buffer_id]
420
+ gradients = buffers['gradients'][buffer_id]
421
+ torch.autograd.backward(tensors=outputs,
422
+ grad_tensors=gradients)
423
+ """
424
+ pass
425
+
426
+
427
+ # Communication
428
+ class SendActivation(BufferOpInstruction):
429
+ """Send activations to the next stage in the pipeline.
430
+
431
+ Roughly:
432
+
433
+ .. code-block:: python
434
+
435
+ send(buffers['outputs'][buffer_id])
436
+
437
+ .. note::
438
+ The communication is blocking and must be paired with a :class:`RecvActivation`
439
+ on the next pipeline stage to avoid deadlock.
440
+ """
441
+ pass
442
+
443
+
444
+ class RecvActivation(BufferOpInstruction):
445
+ """Receive activations from the previous stage in the pipeline.
446
+
447
+ Roughly:
448
+
449
+ .. code-block:: python
450
+
451
+ buffers['inputs'][buffer_id] = recv()
452
+
453
+ .. note::
454
+ The communication is blocking and must be paired with a :class:`SendActivation`
455
+ on the previous pipeline stage to avoid deadlock.
456
+ """
457
+ pass
458
+
459
+
460
+ class SendGrad(BufferOpInstruction):
461
+ """Send computed gradients to the previous pipeline stage.
462
+ with respect to the received activations
463
+
464
+ .. note::
465
+ Only received tensors with ``requires_grad==True`` will produce gradients.
466
+ Missing gradients will be replaced with ``None`` on the receiving stage.
467
+
468
+ .. note::
469
+ The communication is blocking and must be paired with a :class:`RecvGrad`
470
+ on the previous pipeline stage to avoid deadlock.
471
+ """
472
+ pass
473
+
474
+
475
+ class RecvGrad(BufferOpInstruction):
476
+ """Receive computed gradients the next pipeline stage.
477
+
478
+ .. note::
479
+ Only activations with ``requires_grad==True`` will produce gradients.
480
+ Missing gradients will be replaced with ``None``.
481
+
482
+ .. note::
483
+ The communication is blocking and must be paired with a :class:`SendGrad`
484
+ on the next pipeline stage to avoid deadlock.
485
+ """
486
+ pass
487
+
488
+
489
+ def _is_even(x):
490
+ return x % 2 == 0
491
+
492
+
493
+ def _is_odd(x):
494
+ return x % 2 != 0
venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/topology.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed import comm as dist
7
+
8
+ from collections import namedtuple
9
+ from itertools import product as cartesian_product
10
+
11
+
12
+ class ProcessTopology:
13
+ """ Manages the mapping of n-dimensional Cartesian coordinates to linear
14
+ indices. This mapping is used to map the rank of processes to the grid
15
+ for various forms of parallelism.
16
+
17
+ Each axis of the tensor is accessed by its name. The provided ordering
18
+ of the axes defines the layout of the topology. ProcessTopology uses a "row-major"
19
+ layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and
20
+ (x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates
21
+ (x,y) and (x+1,y) would be adjacent.
22
+
23
+ Some methods return ProcessCoord namedtuples.
24
+ """
25
+
26
+ def __init__(self, axes, dims):
27
+ """Create a mapping of n-dimensional tensor coordinates to linear indices.
28
+
29
+ Arguments:
30
+ axes (list): the names of the tensor axes
31
+ dims (list): the dimension (length) of each axis of the topology tensor
32
+ """
33
+
34
+ self.axes = axes # names of each topology axis
35
+ self.dims = dims # length of each topology axis
36
+
37
+ # This is actually a class that lets us hash {'row':3, 'col':2} mappings
38
+ self.ProcessCoord = namedtuple('ProcessCoord', axes)
39
+
40
+ self.mapping = {}
41
+ ranges = [range(d) for d in dims]
42
+ # example: 1, (0,0,1)
43
+ for global_rank, coord in enumerate(cartesian_product(*ranges)):
44
+ key = {axis: coord[self.axes.index(axis)] for axis in self.axes}
45
+ key = self.ProcessCoord(**key)
46
+ # for example, {ProcessCoord(row=0, col=1) : 1}
47
+ self.mapping[key] = global_rank
48
+
49
+ def get_rank(self, **coord_kwargs):
50
+ """Return the global rank of a process via its coordinates.
51
+
52
+ Coordinates are specified as kwargs. For example:
53
+
54
+ >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
55
+ >>> X.get_rank(x=0, y=1)
56
+ 1
57
+ """
58
+ if len(coord_kwargs) != len(self.axes):
59
+ raise ValueError('get_rank() does not support slices. Use filter_match())')
60
+
61
+ key = self.ProcessCoord(**coord_kwargs)
62
+ assert key in self.mapping, f'key {coord_kwargs} invalid'
63
+ return self.mapping[key]
64
+
65
+ def get_axis_names(self):
66
+ """Return a list of the axis names in the ordering of the topology. """
67
+ return self.axes
68
+
69
+ def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'):
70
+ """Return a string representation of a rank.
71
+
72
+ This method is primarily used for checkpointing model data.
73
+
74
+ For example:
75
+ >>> topo = Topo(axes=['a', 'b'], dims=[2, 2])
76
+ >>> topo.get_rank_repr(rank=3)
77
+ 'a_01-b_01'
78
+ >>> topo.get_rank_repr(rank=3, omit_axes=['a'])
79
+ 'b_01'
80
+
81
+ Args:
82
+ rank (int): A rank in the topology.
83
+ omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe'].
84
+ inner_sep (str, optional): [description]. Defaults to '_'.
85
+ outer_sep (str, optional): [description]. Defaults to '-'.
86
+
87
+ Returns:
88
+ str: A string representation of the coordinate owned by ``rank``.
89
+ """
90
+ omit_axes = frozenset(omit_axes)
91
+ axes = [a for a in self.get_axis_names() if a not in omit_axes]
92
+ names = []
93
+ for ax in axes:
94
+ ax_rank = getattr(self.get_coord(rank=rank), ax)
95
+ names.append(f'{ax}{inner_sep}{ax_rank:02d}')
96
+ return outer_sep.join(names)
97
+
98
+ def get_dim(self, axis):
99
+ """Return the number of processes along the given axis.
100
+
101
+ For example:
102
+ >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
103
+ >>> X.get_dim('y')
104
+ 3
105
+ """
106
+ if axis not in self.axes:
107
+ return 0
108
+ return self.dims[self.axes.index(axis)]
109
+
110
+ def get_coord(self, rank):
111
+ """Return the coordinate owned by a process rank.
112
+
113
+ The axes of the returned namedtuple can be directly accessed as members. For
114
+ example:
115
+ >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
116
+ >>> coord = X.get_coord(rank=1)
117
+ >>> coord.x
118
+ 0
119
+ >>> coord.y
120
+ 1
121
+ """
122
+ for coord, idx in self.mapping.items():
123
+ if idx == rank:
124
+ return coord
125
+ raise ValueError(f'rank {rank} not found in topology.')
126
+
127
+ def get_axis_comm_lists(self, axis):
128
+ """ Construct lists suitable for a communicator group along axis ``axis``.
129
+
130
+ Example:
131
+ >>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
132
+ >>> topo.get_axis_comm_lists('pipe')
133
+ [
134
+ [0, 4], # data=0, model=0
135
+ [1, 5], # data=0, model=1
136
+ [2, 6], # data=1, model=0
137
+ [3, 7], # data=1, model=1
138
+ ]
139
+
140
+ Returns:
141
+ A list of lists whose coordinates match in all axes *except* ``axis``.
142
+ """
143
+
144
+ # We don't want to RuntimeError because it allows us to write more generalized
145
+ # code for hybrid parallelisms.
146
+ if axis not in self.axes:
147
+ return []
148
+
149
+ # Grab all axes but `axis`
150
+ other_axes = [a for a in self.axes if a != axis]
151
+
152
+ lists = []
153
+
154
+ # Construct all combinations of coords with other_axes
155
+ ranges = [range(self.get_dim(a)) for a in other_axes]
156
+ for coord in cartesian_product(*ranges):
157
+ other_keys = {a: coord[other_axes.index(a)] for a in other_axes}
158
+ # now go over all ranks in `axis`.
159
+ sub_list = []
160
+ for axis_key in range(self.get_dim(axis)):
161
+ key = self.ProcessCoord(**other_keys, **{axis: axis_key})
162
+ sub_list.append(self.mapping[key])
163
+ lists.append(sub_list)
164
+
165
+ return lists
166
+
167
+ def filter_match(self, **filter_kwargs):
168
+ """Return the list of ranks whose coordinates match the provided criteria.
169
+
170
+ Example:
171
+ >>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
172
+ >>> X.filter_match(pipe=0, data=1)
173
+ [2, 3]
174
+ >>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)]
175
+ [ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)]
176
+
177
+ Arguments:
178
+ **filter_kwargs (dict): criteria used to select coordinates.
179
+
180
+ Returns:
181
+ The list of ranks whose coordinates match filter_kwargs.
182
+ """
183
+
184
+ def _filter_helper(x):
185
+ for key, val in filter_kwargs.items():
186
+ if getattr(x, key) != val:
187
+ return False
188
+ return True
189
+
190
+ coords = filter(_filter_helper, self.mapping.keys())
191
+ return [self.mapping[coord] for coord in coords]
192
+
193
+ def get_axis_list(self, axis, idx):
194
+ """Returns the list of global ranks whose coordinate in an axis is idx.
195
+
196
+ For example:
197
+ >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
198
+ >>> X.get_axis_list(axis='x', idx=0)
199
+ [0, 1, 2]
200
+ >>> X.get_axis_list(axis='y', idx=0)
201
+ [0, 3]
202
+ """
203
+
204
+ # This could be faster by generating the desired keys directly instead of
205
+ # filtering.
206
+ axis_num = self.axes.index(axis)
207
+ ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx]
208
+ return ranks
209
+
210
+ def world_size(self):
211
+ return len(self.mapping)
212
+
213
+ def __str__(self):
214
+ return str(self.mapping)
215
+
216
+
217
+ def _prime_factors(N):
218
+ """ Returns the prime factorization of positive integer N. """
219
+ if N <= 0:
220
+ raise ValueError("Values must be strictly positive.")
221
+
222
+ primes = []
223
+ while N != 1:
224
+ for candidate in range(2, N + 1):
225
+ if N % candidate == 0:
226
+ primes.append(candidate)
227
+ N //= candidate
228
+ break
229
+ return primes
230
+
231
+
232
+ class PipeDataParallelTopology(ProcessTopology):
233
+ """ A topology specialization for hybrid data and pipeline parallelism.
234
+
235
+ Uses data parallelism on the last dimension to encourage gradient
236
+ reductions to use high-bandwidth intra-node links and lower-volume
237
+ pipeline communications to use low-bandwidth inter-node links.
238
+ """
239
+
240
+ def __init__(self, num_pp, num_dp):
241
+ super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp])
242
+
243
+
244
+ class PipeModelDataParallelTopology(ProcessTopology):
245
+ """ A topology for hybrid pipeline, model, and data parallelism. """
246
+
247
+ def __init__(self, num_pp, num_mp, num_dp):
248
+ super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp])
249
+
250
+
251
+ class PipelineParallelGrid:
252
+ """Implements a grid object that stores the data parallel ranks
253
+ corresponding to each of the model parallel stages
254
+
255
+ The grid object organizes the processes in a distributed pytorch job
256
+ into a 2D grid, of stage_id and data_parallel_id.
257
+
258
+ self.stage_id and self.data_parallel_id stores the stage id
259
+ and the data parallel id of current process.
260
+
261
+ self.dp_group groups the processes by stage_id.
262
+ self.dp_group[i], is a list containing all process ranks whose
263
+ stage_id is i.
264
+
265
+ self.p2p_groups stores a list of tuple, where each tuple
266
+ stores process ranks of adjacent stages for a given data_parallel_id.
267
+ For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4],
268
+ with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks,
269
+ for example tuple [4,0] with representing wrap-around stage 4 and 0, for
270
+ data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0]
271
+ for data_parallel_id = 1.
272
+ """
273
+
274
+ def __init__(self, topology=None, process_group=None):
275
+ # TODO use process_group if provided
276
+ self.global_rank = dist.get_rank()
277
+ self.world_size = dist.get_world_size()
278
+ if topology is not None:
279
+ if self.global_rank == 0:
280
+ print('Using topology:', topology)
281
+ self._topo = topology
282
+ else:
283
+ num_pp = 1
284
+ num_dp = 1
285
+ for idx, prime in enumerate(_prime_factors(self.world_size)):
286
+ if idx % 2 == 0:
287
+ num_pp *= prime
288
+ else:
289
+ num_dp *= prime
290
+ self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp)
291
+ self.data_parallel_size = max(self._topo.get_dim('data'), 1)
292
+ self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1)
293
+ self.model_parallel_size = max(self._topo.get_dim('model'), 1)
294
+ self.slice_parallel_size = self.model_parallel_size
295
+ assert self._is_grid_valid(), "Invalid Grid"
296
+
297
+ self.stage_id = self.get_stage_id()
298
+ self.data_parallel_id = self.get_data_parallel_id()
299
+
300
+ # Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these
301
+ # to detect overflow, etc.
302
+ self.ds_model_proc_group = None
303
+ self.ds_model_rank = -1
304
+ for dp in range(self.data_parallel_size):
305
+ ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp))
306
+ if self.global_rank == 0:
307
+ #print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}')
308
+ pass
309
+ proc_group = dist.new_group(ranks=ranks)
310
+ if self.global_rank in ranks:
311
+ self.ds_model_proc_group = proc_group
312
+ self.ds_model_world_size = len(ranks)
313
+ self.ds_model_rank = ranks.index(self.global_rank)
314
+ assert self.ds_model_rank > -1
315
+ assert self.ds_model_proc_group is not None
316
+
317
+ # Create new ProcessGroup for gradient all-reduces - these are the data parallel groups
318
+ self.dp_group = []
319
+ self.dp_groups = self._topo.get_axis_comm_lists('data')
320
+ for g in self.dp_groups:
321
+ proc_group = dist.new_group(ranks=g)
322
+ if self.global_rank in g:
323
+ self.dp_group = g
324
+ self.dp_proc_group = proc_group
325
+
326
+ self.is_first_stage = (self.stage_id == 0)
327
+ self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1))
328
+
329
+ self.p2p_groups = self._build_p2p_groups()
330
+
331
+ # Create new ProcessGroup for pipeline collectives - these are pipe parallel groups
332
+ self.pp_group = []
333
+ self.pp_proc_group = None
334
+ self.pipe_groups = self._topo.get_axis_comm_lists('pipe')
335
+ for ranks in self.pipe_groups:
336
+ if self.global_rank == 0:
337
+ #print(f'RANK={self.global_rank} building pipeline group: {ranks}')
338
+ pass
339
+ proc_group = dist.new_group(ranks=ranks)
340
+ if self.global_rank in ranks:
341
+ self.pp_group = ranks
342
+ self.pp_proc_group = proc_group
343
+ assert self.pp_proc_group is not None
344
+
345
+ # Create new ProcessGroup for model (tensor-slicing) collectives
346
+
347
+ # Short circuit case without model parallelism.
348
+ # TODO: it would be nice if topology had bcast semantics to avoid this branching
349
+ # case?
350
+ if self.model_parallel_size == 1:
351
+ for group_rank in range(self.world_size):
352
+ group_rank = [group_rank]
353
+ group = dist.new_group(ranks=group_rank)
354
+ if group_rank[0] == self.global_rank:
355
+ self.slice_group = group_rank
356
+ self.slice_proc_group = group
357
+ return
358
+ else:
359
+ self.mp_group = []
360
+ self.model_groups = self._topo.get_axis_comm_lists('model')
361
+ for g in self.model_groups:
362
+ proc_group = dist.new_group(ranks=g)
363
+ if self.global_rank in g:
364
+ self.slice_group = g
365
+ self.slice_proc_group = proc_group
366
+
367
+ def get_stage_id(self):
368
+ return self._topo.get_coord(rank=self.global_rank).pipe
369
+
370
+ def get_data_parallel_id(self):
371
+ return self._topo.get_coord(rank=self.global_rank).data
372
+
373
+ def _build_p2p_groups(self):
374
+ """Groups for sending and receiving activations and gradients across model
375
+ parallel stages.
376
+ """
377
+ comm_lists = self._topo.get_axis_comm_lists('pipe')
378
+ p2p_lists = []
379
+ for rank in range(self.world_size):
380
+ for l in comm_lists:
381
+ assert len(l) == self.pipe_parallel_size
382
+ if rank in l:
383
+ idx = l.index(rank)
384
+ buddy_rank = l[(idx + 1) % self.pipe_parallel_size]
385
+ p2p_lists.append([rank, buddy_rank])
386
+ break # next global rank
387
+ assert len(p2p_lists) == self.world_size
388
+ return p2p_lists
389
+
390
+ def _is_grid_valid(self):
391
+ ranks = 1
392
+ for ax in self._topo.get_axis_names():
393
+ ranks *= self._topo.get_dim(ax)
394
+ return ranks == dist.get_world_size()
395
+
396
+ #returns the global rank of the process with the provided stage id
397
+ #which has the same data_parallel_id as caller process
398
+ def stage_to_global(self, stage_id, **kwargs):
399
+ me = self._topo.get_coord(self.global_rank)
400
+ transform = me._replace(pipe=stage_id, **kwargs)._asdict()
401
+ return self._topo.get_rank(**transform)
402
+
403
+ def topology(self):
404
+ return self._topo
405
+
406
+ # MPU functions for DeepSpeed integration
407
+ def get_global_rank(self):
408
+ return self.global_rank
409
+
410
+ def get_pipe_parallel_rank(self):
411
+ """ The stage of the pipeline this rank resides in. """
412
+ return self.get_stage_id()
413
+
414
+ def get_pipe_parallel_world_size(self):
415
+ """ The number of stages in the pipeline. """
416
+ return self.pipe_parallel_size
417
+
418
+ def get_pipe_parallel_group(self):
419
+ """ The group of ranks within the same pipeline. """
420
+ return self.pp_proc_group
421
+
422
+ def get_data_parallel_rank(self):
423
+ """ Which pipeline this rank resides in. """
424
+ return self.data_parallel_id
425
+
426
+ def get_data_parallel_world_size(self):
427
+ """ The number of pipelines. """
428
+ return self.data_parallel_size
429
+
430
+ def get_data_parallel_group(self):
431
+ """ The group of ranks within the same stage of all pipelines. """
432
+ return self.dp_proc_group
433
+
434
+ # These are model parallel groups across all types of model parallelism.
435
+ # Deepspeed uses them to detect overflow, etc.
436
+ def get_model_parallel_rank(self):
437
+ return self.ds_model_rank
438
+
439
+ def get_model_parallel_world_size(self):
440
+ return self.ds_model_world_size
441
+
442
+ def get_model_parallel_group(self):
443
+ return self.ds_model_proc_group
444
+
445
+ # For Megatron-style tensor slicing
446
+ def get_slice_parallel_rank(self):
447
+ if 'model' in self._topo.get_axis_names():
448
+ return self._topo.get_coord(rank=self.global_rank).model
449
+ else:
450
+ return 0
451
+
452
+ def get_slice_parallel_world_size(self):
453
+ return self.slice_parallel_size
454
+
455
+ def get_slice_parallel_group(self):
456
+ return self.slice_proc_group
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .partition_parameters import ZeroParamType
7
+ from .partition_parameters import ZeroParamStatus
8
+ from .partition_parameters import Init
9
+ from .partition_parameters import GatheredParameters
10
+ from .partition_parameters import register_external_parameter
11
+
12
+ from .tiling import TiledLinear
13
+ from .tiling import TiledLinearReturnBias
14
+
15
+ from .mics import MiCS_Init
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (528 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/config.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/contiguous_memory_allocator.cpython-310.pyc ADDED
Binary file (7.76 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/linear.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics.cpython-310.pyc ADDED
Binary file (16.3 kB). View file