diff --git a/.gitattributes b/.gitattributes index f4cc696bbfd3a47382b99441add3faa72ff24f04..30a76323b7e7780aaea1da254c7638e41e8349fa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -138,3 +138,6 @@ venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux- venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66bda96fa6bafa90c8a68f4033b665220880bcf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config +from .utils import is_torch_elastic_compatible +from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY +if is_torch_elastic_compatible(): + from .elastic_agent import DSElasticAgent diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01d8bb77ab12478a0d971178b55f499a56a8951f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/config.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6bd42cdfd93e7979e942d3b353e55744b356d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/config.py @@ -0,0 +1,110 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +from .constants import * + + +class ElasticityError(Exception): + """ + Base exception for all elasticity related errors + """ + + +class ElasticityConfigError(ElasticityError): + """ + Elasticity configuration error + """ + + +class ElasticityIncompatibleWorldSize(ElasticityError): + """ + Attempting to run a world size that is incompatible with a given elastic config + """ + + +class ElasticityConfig: + """ + Elastic config object, constructed from a param dictionary that only contains elastic + config parameters, example below: + + If elasticity is enabled, user must specify (at least) max_train_batch_size + and micro_batch_sizes. + + { + "enabled": true, + "max_train_batch_size": 2000, + "micro_batch_sizes": [2,4,6], + "min_gpus": 1, + "max_gpus" : 10000 + "min_time": 20 + "ignore_non_elastic_batch_info": false + "version": 0.1 + } + """ + + def __init__(self, param_dict): + self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT) + if self.enabled: + if MAX_ACCEPTABLE_BATCH_SIZE in param_dict: + self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE] + else: + raise ElasticityConfigError(f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}") + if MICRO_BATCHES in param_dict: + self.micro_batches = param_dict[MICRO_BATCHES] + else: + raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}") + else: + self.max_acceptable_batch_size = param_dict.get(MAX_ACCEPTABLE_BATCH_SIZE, + MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT) + self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT) + + if not isinstance(self.micro_batches, list): + raise ElasticityConfigError( + f"Elasticity expected value of {MICRO_BATCHES} to be a " + f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}") + + if not all(map(lambda m: isinstance(m, int), self.micro_batches)): + raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, " + f"instead contains: f{self.micro_batches}") + + if not all(map(lambda m: m > 0, self.micro_batches)): + raise ElasticityConfigError(f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, " + f"instead contains: f{self.micro_batches}") + + self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT) + self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT) + + if self.min_gpus < 1 or self.max_gpus < 1: + raise ElasticityConfigError("Elasticity min/max gpus must be > 0, " + f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}") + if self.max_gpus < self.min_gpus: + raise ElasticityConfigError("Elasticity min_gpus cannot be greater than max_gpus, " + f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}") + + self.model_parallel_size = param_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT) + if self.model_parallel_size < 1: + raise ElasticityConfigError("Model-Parallel size cannot be less than 1, " + f"given model-parallel size: {self.model_parallel_size}") + + self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT) + if self.num_gpus_per_node < 1: + raise ElasticityConfigError("Number of GPUs per node cannot be less than 1, " + f"given number of GPUs per node: {self.num_gpus_per_node}") + + self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT) + if self.min_time < 0: + raise ElasticityConfigError(f"Elasticity min time needs to be >= 0: given {self.min_time}") + + self.version = param_dict.get(VERSION, VERSION_DEFAULT) + self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH, PREFER_LARGER_BATCH_DEFAULT) + self.ignore_non_elastic_batch_info = param_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO, + IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT) + + def repr(self): + return self.__dict__ + + def __repr__(self): + return json.dumps(self.__dict__, sort_keys=True, indent=4) diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/constants.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..b3134c54b4d6a127a04b9f633a51226b6713be20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/constants.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######################################### +# Elasticity +######################################### +''' Elasticity Utility in DeepSpeed can be used to create highly elastic jobs compatible +with a large number of GPUs. For elastic jobs, DeepSpeed will provide a batch size that +can support a large number of GPUs based on the user specified parameters +''' +FORMAT = ''' +Elasticity should be enabled as: +"elasticity": { + "enabled": true, + "max_train_batch_size": 2000, + "micro_batch_sizes": [2,4,6], + "min_gpus": 1, + "max_gpus" : 10000, + "min_time": 20, + "prefer_larger_batch": true, + "ignore_non_elastic_batch_info": false, + "version": 0.1 +} +''' + +ELASTICITY = 'elasticity' + +# Current elasticity version +LATEST_ELASTICITY_VERSION = 0.2 + +ENABLED = 'enabled' +ENABLED_DEFAULT = False + +# Max acceptable train_batch_size +MAX_ACCEPTABLE_BATCH_SIZE = 'max_train_batch_size' +MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT = 2000 + +# Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu +MICRO_BATCHES = 'micro_batch_sizes' +MICRO_BATCHES_DEFAULT = [2, 4, 6] + +# Min/max of GPUs to search over +MIN_GPUS = 'min_gpus' +MIN_GPUS_DEFAULT = 1 +MAX_GPUS = 'max_gpus' +MAX_GPUS_DEFAULT = 10000 + +NUM_GPUS_PER_NODE = 'num_gpus_per_node' +NUM_GPUS_PER_NODE_DEFAULT = 1 + +MODEL_PARALLEL_SIZE = "model_parallel_size" +MODEL_PARALLEL_SIZE_DEFAULT = 1 + +# Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown +MIN_TIME = "min_time" +MIN_TIME_DEFAULT = 0 + +# When finding a suitable batch size, attempt to find one that is closest +# to the max train batch size given. +PREFER_LARGER_BATCH = 'prefer_larger_batch' +PREFER_LARGER_BATCH_DEFAULT = True + +# In order to reduce confusion, if elastic mode is enabled we +# require (via assert) that no batch info is set outside of the +# elastic config. You can turn off this assert via this config +# but keep in mind that all batch info defined outside the +# elastic mode *will be ignored*. +IGNORE_NON_ELASTIC_BATCH_INFO = 'ignore_non_elastic_batch_info' +IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT = False + +# Version of elastic logic to use +VERSION = "version" +VERSION_DEFAULT = LATEST_ELASTICITY_VERSION + +# Minimum deepspeed version to use elasticity +MINIMUM_DEEPSPEED_VERSION = "0.3.8" + +# Environment variable storing elastic config from resource scheduler +DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG" diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/elastic_agent.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/elastic_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..6051529bfa24ade788c482bb7d49232353852708 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/elastic_agent.py @@ -0,0 +1,188 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent +from typing import Any, Dict, Optional, Tuple +from datetime import datetime +from torch.distributed.elastic.agent.server.api import _get_socket_with_port +from torch.distributed.elastic.metrics import put_metric +from torch.distributed.elastic.agent.server.api import ( + RunResult, + WorkerGroup, + WorkerSpec, + WorkerState, +) +from torch.distributed import Store +import time +import os +from torch.distributed.elastic.multiprocessing import start_processes +from torch.distributed.elastic.utils import macros +import shutil +import copy +from contextlib import closing +import subprocess + +from torch.distributed.elastic.utils.logging import get_logger + +log = get_logger(__name__) + + +class DSElasticAgent(LocalElasticAgent): + + def __init__( + self, + spec: WorkerSpec, + env: Dict, + start_method="spawn", + exit_barrier_timeout: float = 300, + log_dir: Optional[str] = None, + ): + super().__init__(spec, start_method, exit_barrier_timeout, log_dir) + self.ds_env = env + + @staticmethod + def _set_master_addr_port(store: Store, + master_addr: Optional[str], + master_port: Optional[int], + local_addr: Optional[str] = None): + if master_port is None: + sock = _get_socket_with_port() + with closing(sock): + master_port = sock.getsockname()[1] + + if master_addr is None: + # master_addr = _get_fq_hostname() + result = subprocess.check_output("hostname -I", shell=True) + master_addr = result.decode('utf-8').split()[0] + + store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8")) + store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8")) + + def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]: + spec = worker_group.spec + store = worker_group.store + assert store is not None + master_addr, master_port = super()._get_master_addr_port(store) + restart_count = spec.max_restarts - self._remaining_restarts + + use_agent_store = spec.rdzv_handler.get_backend() == "static" + + args: Dict[int, Tuple] = {} + envs: Dict[int, Dict[str, str]] = {} + for worker in worker_group.workers: + local_rank = worker.local_rank + + worker_env_ds = copy.deepcopy(self.ds_env) + worker_env_elastic = { + "LOCAL_RANK": str(local_rank), + "RANK": str(worker.global_rank), + "GROUP_RANK": str(worker_group.group_rank), + "ROLE_RANK": str(worker.role_rank), + "ROLE_NAME": spec.role, + "LOCAL_WORLD_SIZE": str(spec.local_world_size), + "WORLD_SIZE": str(worker.world_size), + "GROUP_WORLD_SIZE": str(worker_group.group_world_size), + "ROLE_WORLD_SIZE": str(worker.role_world_size), + "MASTER_ADDR": master_addr, + "MASTER_PORT": str(master_port), + "TORCHELASTIC_RESTART_COUNT": str(restart_count), + "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts), + "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(), + "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store), + "NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)), + } + worker_env_ds.update(worker_env_elastic) + if "OMP_NUM_THREADS" in os.environ: + worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"] + + envs[local_rank] = worker_env_ds + worker_args = list(spec.args) + worker_args = macros.substitute(worker_args, str(local_rank)) + args[local_rank] = tuple(worker_args) + + # scaling events do not count towards restarts (gets same attempt #) + # remove existing log dir if this restart is due to a scaling event + attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}") + shutil.rmtree(attempt_log_dir, ignore_errors=True) + os.makedirs(attempt_log_dir) + + assert spec.entrypoint is not None + self._pcontext = start_processes( + name=spec.role, + entrypoint=spec.entrypoint, + args=args, + envs=envs, + log_dir=attempt_log_dir, + start_method=self._start_method, + redirects=spec.redirects, + tee=spec.tee, + ) + + return self._pcontext.pids() + + def _invoke_run(self, role: str = "default") -> RunResult: + # NOTE: currently only works for a single role + + spec = self._worker_group.spec + role = spec.role + + log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}") + + self._initialize_workers(self._worker_group) + monitor_interval = spec.monitor_interval + rdzv_handler = spec.rdzv_handler + + participants = rdzv_handler._state_holder.state.participants + + while True: + assert self._worker_group.state != WorkerState.INIT + time.sleep(monitor_interval) + run_result = self._monitor_workers(self._worker_group) + state = run_result.state + self._worker_group.state = state + + expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval * + rdzv_handler._settings.keep_alive_max_attempt) + _dead_nodes = [ + node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items() + if last_heartbeat < expire_time + ] + + put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts) + put_metric(f"workers.{role}.{state.name.lower()}", 1) + + if state == WorkerState.SUCCEEDED: + log.info(f"[{role}] worker group successfully finished." + f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.") + self._exit_barrier() + return run_result + elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED + } or len(participants) > len(rdzv_handler._state_holder.state.participants): + if self._remaining_restarts > 0: + log.info(f"[{role}] Worker group {state.name}. " + f"{self._remaining_restarts}/{spec.max_restarts} attempts left;" + f" will restart worker group") + self._remaining_restarts -= 1 + # rdzv_handler._state_holder.state.restart = False + self._restart_workers(self._worker_group) + participants = rdzv_handler._state_holder.state.participants + + else: + self._stop_workers(self._worker_group) + self._worker_group.state = WorkerState.FAILED + self._exit_barrier() + return run_result + elif state == WorkerState.HEALTHY: + # membership changes do not count as retries + num_nodes_waiting = rdzv_handler.num_nodes_waiting() + group_rank = self._worker_group.group_rank + if num_nodes_waiting > 0: + log.info(f"[{role}] Detected {num_nodes_waiting} " + f"new nodes from group_rank={group_rank}; " + f"will restart worker group") + self._restart_workers(self._worker_group) + participants = rdzv_handler._state_holder.state.participants + else: + raise Exception(f"[{role}] Worker group in {state.name} state") diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/elasticity.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/elasticity.py new file mode 100644 index 0000000000000000000000000000000000000000..f26f13c98dfaf965597d699418d98c80f35f67b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/elasticity.py @@ -0,0 +1,378 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import json +import numpy as np +import math +from packaging import version as pkg_version + +from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \ + ElasticityIncompatibleWorldSize +from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \ + MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG +from ..git_version_info import version as __version__ +from ..utils import logger + +# Thirty eight smallest highly composite numbers. The list should +# be enough to support up to 720K batch size. +HCN_LIST = [ + 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560, 10080, 15120, 20160, + 25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, 720720 +] + + +def get_candidate_batch_sizes(base_list, max_acceptable_batch_size): + candidate_batch_size = [] + for base in base_list: + if base >= max_acceptable_batch_size: + candidate_batch_size.append(base) + else: + value = max_acceptable_batch_size // base + index = np.argmax(np.asarray(HCN_LIST) > value) + candidate_batch_size.append(HCN_LIST[index - 1] * base) + candidate_batch_size = list(set(candidate_batch_size)) + logger.info(f"Candidate batch size: {candidate_batch_size}") + return candidate_batch_size + + +def get_valid_gpus(batch_size, micro_batches, min_valid_gpus, max_valid_gpus): + valid_gpus = [] + for micro_batch in micro_batches: + if batch_size % micro_batch == 0: + + max_gpus = batch_size // micro_batch + if min_valid_gpus <= max_gpus <= max_valid_gpus: + valid_gpus.append(max_gpus) + + # find all factors less than max_gpus / 2 + for i in range(1, max_gpus // 2 + 1): + if i > max_valid_gpus: + break + if i < min_valid_gpus: + continue + if max_gpus % i == 0: + valid_gpus.append(i) + valid_gpus = set(valid_gpus) + valid_gpus = sorted(list(valid_gpus)) + return valid_gpus + + +def get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus, prefer_larger): + + max_valid_gpus = 0 + valid_gpus = None + final_batch_size = int(min(micro_batches)) + + for batch_size in candidate_batch_sizes: + + current_valid_gpus = get_valid_gpus(batch_size, micro_batches, min_gpus, max_gpus) + + if (len(current_valid_gpus) > max_valid_gpus or (len(current_valid_gpus) == max_valid_gpus and + ((prefer_larger and batch_size > final_batch_size) or + (not prefer_larger and batch_size < final_batch_size)))): + max_valid_gpus = len(current_valid_gpus) + valid_gpus = current_valid_gpus + final_batch_size = batch_size + + return final_batch_size, valid_gpus + + +def _get_compatible_gpus_v01(micro_batches, + max_acceptable_batch_size, + min_gpus=None, + max_gpus=None, + prefer_larger=True): + '''We use two heuristics to compute the batch size + 1. We use the Lowest Common Multiple of the micro-batches + as the base batch size and scale it by a HCN such that the result is + the largest batch size less than the max_acceptable batch size + 2. We use each of the micro batches as a base and scale it + by a HCN such that the result is the largest batch size less than the + max_acceptable batch size. + + We then use brute force to count the number of compatible GPU count for + each of the aforementioned cases, and return the batch size with the most number of + compatible GPU counts in the min-max GPU range if provided, other wise + we return the batch size with the most number of total compatible GPU counts. + + Returns: + final_batch_size + valid_gpus + ''' + min_gpus = min_gpus or 1 + max_gpus = max_gpus or max_acceptable_batch_size // min(micro_batches) + + if not all(mb <= max_acceptable_batch_size for mb in micro_batches): + raise ValueError(f"All micro batches must be less than \ + or equal to max_acceptable_batch_size: {max_acceptable_batch_size}") + + lcm = np.lcm.reduce(micro_batches) + + base_list = [] + base_list.extend(micro_batches) + base_list.append(lcm) + + candidate_batch_sizes = get_candidate_batch_sizes(base_list, max_acceptable_batch_size) + + final_batch_size, valid_gpus = get_best_candidates(candidate_batch_sizes, micro_batches, min_gpus, max_gpus, + prefer_larger) + + return final_batch_size, valid_gpus + + +def _get_compatible_gpus_v02(micro_batches, + max_acceptable_batch_size, + current_num_gpus, + min_gpus=None, + max_gpus=None, + prefer_larger=True, + num_gpus_per_node=1, + model_parallel_size=1): + ''' + Returns: + final_batch_size + valid_gpus + micro-batch size + ''' + if num_gpus_per_node % model_parallel_size != 0: + raise ElasticityError( + f"In Elasticity v0.2, number of GPUs per node:" \ + f"{num_gpus_per_node} should be divisible by " \ + f"model parallel size {model_parallel_size}") + + def get_microbatch(final_batch_size): + candidate_microbatch = None + + for micro_batch in micro_batches: + if final_batch_size // current_num_gpus % micro_batch == 0: + if candidate_microbatch is None: + candidate_microbatch = micro_batch + if prefer_larger and candidate_microbatch < micro_batch: + candidate_microbatch = micro_batch + return candidate_microbatch + + dp_size_per_node = num_gpus_per_node // model_parallel_size + + final_batch_size, valid_world_size = _get_compatible_gpus_v01( + micro_batches, + int(max_acceptable_batch_size / dp_size_per_node), + int(min_gpus / num_gpus_per_node), + int(max_gpus / num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level + prefer_larger=prefer_larger) + + final_batch_size = int(final_batch_size) * dp_size_per_node + valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size] + if current_num_gpus // model_parallel_size in valid_dp_world_size: + candidate_microbatch = get_microbatch(final_batch_size) + return final_batch_size, valid_dp_world_size, candidate_microbatch + + current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node + candidate_batch_sizes = [] + for micro_batch in micro_batches: + min_batch_size = micro_batch * current_dp_size + + factor = math.floor(max_acceptable_batch_size / float(min_batch_size)) + candidate_batch_sizes.append(factor * min_batch_size) + + used_microbatch = None + if prefer_larger: + candidate_batch_size = max(candidate_batch_sizes) + else: + candidate_batch_size = min(candidate_batch_sizes) + + candidate_microbatch = get_microbatch(candidate_batch_size) + + return candidate_batch_size, [int(current_dp_size)], candidate_microbatch + + +def _compatible_ds_version_check(target_deepspeed_version: str): + min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION) + target_version = pkg_version.parse(target_deepspeed_version) + + err_str = f"Target deepspeed version of {target_deepspeed_version} is not compatible " \ + f"with minimum version {MINIMUM_DEEPSPEED_VERSION} supporting elasticity." + if target_version < min_version: + raise ElasticityError(err_str) + return True + + +def elasticity_enabled(ds_config: dict): + if ELASTICITY not in ds_config: + return False + return ds_config[ELASTICITY].get(ENABLED, ENABLED_DEFAULT) + + +def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict): + """ + Ensure the resource scheduler saw the same elastic config we are using at runtime + """ + if DEEPSPEED_ELASTICITY_CONFIG in os.environ: + scheduler_elastic_config_dict = json.loads(os.environ[DEEPSPEED_ELASTICITY_CONFIG]) + scheduler_elastic_config = ElasticityConfig(scheduler_elastic_config_dict) + runtime_elastic_config = ElasticityConfig(runtime_elastic_config_dict) + err_str = "Elastic config '{}={}' seen by resource scheduler does not match config passed to runtime {}={}" + if runtime_elastic_config.max_acceptable_batch_size != scheduler_elastic_config.max_acceptable_batch_size: + raise ElasticityConfigError( + err_str.format('max_acceptable_batch_size', scheduler_elastic_config.max_acceptable_batch_size, + 'max_acceptable_batch_size', runtime_elastic_config.max_acceptable_batch_size)) + if runtime_elastic_config.micro_batches != scheduler_elastic_config.micro_batches: + raise ElasticityConfigError( + err_str.format('micro_batches', scheduler_elastic_config.micro_batches, 'micro_batches', + runtime_elastic_config.micro_batches)) + if runtime_elastic_config.version != scheduler_elastic_config.version: + raise ElasticityConfigError( + err_str.format('version', scheduler_elastic_config.version, 'version', runtime_elastic_config.version)) + else: + logger.warning("Unable to find DEEPSPEED_ELASTICITY_CONFIG environment variable, cannot " \ + "guarantee resource scheduler will scale this job using compatible GPU counts.") + + +def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0, return_microbatch=False): + """Core deepspeed elasticity API. Given an elastic config (similar to the example below) + DeepSpeed will compute a total train batch size corresponding valid GPU count list that + provides a high level of elasticity. Elasticity in this case means we are safe to scale + the training job up/down across the GPU count list *without* any negative impacts on + training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation + feature which allows us to decompose a global training batch size into: + micro-batch-size * gradient-accumulation-steps * world-size. + + "elasticity": { + "enabled": true, + "max_train_batch_size": 2000, + "micro_batch_sizes": [2,4,6], + "min_gpus": 1, + "max_gpus" : 10000 + "min_time": 20 + "version": 0.1 + } + + Intended to be called both by scheduling infrastructure and deepspeed runtime. + For the same `ds_config` we should return deterministic results. + + Args: + ds_config (dict): DeepSpeed config dictionary/json + target_deepspeed_version (str): When called from scheduling + infrastructure we want to ensure that the target deepspeed version is + compatible with the elasticity version used in the backend. + world_size (int, optional): Intended/current DP world size, will do some sanity + checks to ensure world size is actually valid with the config. + return_microbatch (bool, optional): whether to return micro batch size or not. + + Raises: + ElasticityConfigError: Missing required elasticity config or elasticity disabled + ElasticityError: If target deepspeed version is not compatible with current version + + Returns: + final_batch_size (int): total batch size used for training + valid_gpus (list(int)): list of valid GPU counts with this config + micro_batch_size (int, optional): if world_size is provided will return + specific micro batch size + """ + if not isinstance(ds_config, dict): + raise ValueError("Expected ds_config to be a dictionary but received " \ + f"a {type(ds_config)}, containing: {ds_config}") + + if ELASTICITY not in ds_config: + raise ElasticityConfigError(f"'{ELASTICITY}' is missing from config json," \ + " please add it if running an elastic training job.") + + elastic_config_dict = ds_config[ELASTICITY] + if not elastic_config_dict.get(ENABLED, ENABLED_DEFAULT): + raise ElasticityConfigError("Elasticity is disabled, please enable it " \ + "('enabled':true) if running an elastic training job.") + + elastic_config = ElasticityConfig(elastic_config_dict) + model_parallel_size = elastic_config.model_parallel_size + num_gpus_per_node = elastic_config.num_gpus_per_node + + if model_parallel_size > 1 and float(elastic_config.version) != 0.2: + raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \ + f"does not support model-parallel training. Given model-parallel size: " \ + f"{model_parallel_size}") + + if float(elastic_config.version) > LATEST_ELASTICITY_VERSION: + raise ElasticityConfigError("Attempting to run elasticity version " \ + f"{elastic_config.version} but runtime only supports up " \ + f"to {LATEST_ELASTICITY_VERSION}") + + # Ensure target deepspeed version works with intended elasticity version + if not _compatible_ds_version_check(target_deepspeed_version): + raise ElasticityError("Unable to run elasticity on target deepspeed version of" \ + f" {target_deepspeed_version}, currently {__version__}") + + if float(elastic_config.version) == 0.1: + final_batch_size, valid_gpus = _get_compatible_gpus_v01( + micro_batches=elastic_config.micro_batches, + max_acceptable_batch_size=elastic_config.max_acceptable_batch_size, + min_gpus=elastic_config.min_gpus, + max_gpus=elastic_config.max_gpus, + prefer_larger=elastic_config.prefer_larger_batch_size) + # ensure batch size is int dtype + final_batch_size = int(final_batch_size) + elif float(elastic_config.version) == 0.2: + if world_size != 0: + current_num_gpus = world_size + else: + if "WORLD_SIZE" in os.environ and \ + os.getenv('WORLD_SIZE').isnumeric(): + current_num_gpus = int(os.getenv('WORLD_SIZE')) + else: + WORLD_SIZE = os.getenv('WORLD_SIZE') + raise ElasticityConfigError( + 'Elasticity V 0.2 needs WORLD_SIZE '\ + 'to compute valid batch size. '\ + 'Either give it as argument to function compute_elastic_config '\ + 'or set it as an environment variable. '\ + f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}') + + final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02( + micro_batches=elastic_config.micro_batches, + max_acceptable_batch_size=elastic_config.max_acceptable_batch_size, + current_num_gpus=current_num_gpus, + min_gpus=elastic_config.min_gpus, + max_gpus=elastic_config.max_gpus, + prefer_larger=elastic_config.prefer_larger_batch_size, + num_gpus_per_node=num_gpus_per_node, + model_parallel_size=model_parallel_size) + # ensure batch size is int dtype + final_batch_size = int(final_batch_size) + else: + raise NotImplementedError(f"Unable to find elastic logic for version: {elastic_config.version}") + + logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}") + + if world_size > 0: + if world_size not in valid_gpus: + raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \ + f"with the current list of valid GPU counts: {valid_gpus}") + + # Pick largest valid micro batch size + micro_batch_size = None + for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True): + if final_batch_size // world_size % mbsz == 0: + micro_batch_size = mbsz + break + assert micro_batch_size is not None, "Unable to find divisible micro batch size" \ + f" world_size={world_size}, final_batch_size={final_batch_size}, and " \ + f" micro_batches={elastic_config.micro_batches}." + return final_batch_size, valid_gpus, micro_batch_size + + if return_microbatch: + # Pick a valid micro batch size + if float(elastic_config.version) == 0.2: + return final_batch_size, valid_gpus, candidate_microbatch_size + else: + micro_batch_size = None + for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True): + if final_batch_size // world_size % mbsz == 0: + micro_batch_size = mbsz + break + assert micro_batch_size is not None, "Unable to find divisible micro batch size" \ + f" world_size={world_size}, final_batch_size={final_batch_size}, and " \ + f" micro_batches={elastic_config.micro_batches}." + return final_batch_size, valid_gpus, micro_batch_size + + return final_batch_size, valid_gpus diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/utils.py b/venv/lib/python3.10/site-packages/deepspeed/elasticity/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..78ae0352cf6eaeab95eed20e9c8259a843f25b93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/elasticity/utils.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.utils.torch import required_torch_version + + +def is_torch_elastic_compatible(): + ''' + Helper to lookup torch version. Elastic training is + introduced in 1.11.x + ''' + return required_torch_version(min_version=1.11) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/base_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/base_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..313679c0d71c8f5050819a3df48d267c45d53567 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/base_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/compiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/compiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c9a24c35f6df0820eb477fdaede3b32b7afa6e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/compiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bde1b5b54fde2e8cc66c65b285a7f8f358000f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..900ee928617751374cf4093528c5faaf06ed4e02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/config_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf58d5aed99ae362258025b3e89d571aff41f477 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/dataloader.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/dataloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc2a92679bb9969a493f4b180bd1fe0004651648 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/dataloader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/eigenvalue.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/eigenvalue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05c88a84b6e561f86533804127e2cc6ef5665998 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/eigenvalue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8696f8e692efdd2c0493df67cdf680de4c81aa6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/hybrid_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/hybrid_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93deaed98c9853c5ac2593e1773dfa4394f308f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/hybrid_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/lr_schedules.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/lr_schedules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6932b474f4ee12b11875be2fe68d48b92734dc79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/lr_schedules.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/progressive_layer_drop.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/progressive_layer_drop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83f48fbafe679b787faf981acbc9f3f18ceb22da Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/progressive_layer_drop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/quantize.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23b0b7f1a312e7c42fc3d243028921d99c8024a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/quantize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/sparse_tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/sparse_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9afe88cec3dd7f42e420facf7486b029b4e85ae0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/sparse_tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/state_dict_factory.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/state_dict_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..023e7e1ab90ebc57fd99650a2ef0f9321ae2ee35 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/state_dict_factory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84a239e73c6841c12806e9b5f0d44bae5ad5fb6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/weight_quantizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/weight_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88431bc628128142d1a2f249de96d50926fc72bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/weight_quantizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16e1d80a958e7b786f63ab612576f24a237f84d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/checkpointing.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea6ec2bb1be9bfe0efe02addf7034de0322607ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/checkpointing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50915c298be190b4a646a7393c1746c4c84c451e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bf93dfa981ff2eb257cf4eb2ee5fe071e3eeac1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65236c5bc3a9bddade042005106bd170e40c5d76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/checkpoint_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/nebula_checkpoint_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/nebula_checkpoint_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..912dffa7426929f87f1e55775e8ce427719db283 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/nebula_checkpoint_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/torch_checkpoint_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/torch_checkpoint_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c659e8b0ffacd44abd6107f40e3af36062bc115b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/__pycache__/torch_checkpoint_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..a341dffdf692cab4b49a0b7a5e6c7bb8c2658672 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + + +class CheckpointEngine(object): + + # init checkpoint engine for save/load + def __init__(self, config_params=None): + pass + + def create(self, tag): + # create checkpoint on give tag for save/load. + pass + + def makedirs(self, path, exist_ok=False): + os.makedirs(path, exist_ok=exist_ok) + + def save(self, state_dict, path: str): + pass + + def load(self, path: str, map_location=None): + pass + + def commit(self, tag): + # to tell checkpoint services if all files are ready. + pass diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..e26e3243c4b5d9bb0daa12734a275c610442757c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +import torch_nebula + +from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ + CheckpointEngine +from deepspeed.utils import logger, log_dist +from deepspeed.nebula.constants import * + + +def _get_tag_from_path(path): + return os.path.basename(os.path.dirname(path)) + + +class NebulaCheckpointEngine(CheckpointEngine): + + def __init__(self, config_params=None): + super().__init__(config_params) + self.checkpoint = None + self.tag_flag = None + self.enable_nebula_load = config_params.enable_nebula_load + self.nebula_load_path = config_params.load_path + if self.nebula_load_path is None: + self.nebula_load_path = config_params.persistent_storage_path + + nebula_config_params = { + NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path, + NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval, + NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention, + } + torch_nebula.init(**nebula_config_params) + + def create(self, tag): + log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0]) + # -2 means: customer needs to explicitly tell nebula + # current checkpoint is complete by commit method. + self.checkpoint = torch_nebula.Checkpoint(tag, -2) + + def save(self, state_dict, path: str): + log_dist(f"[Nebula] Create dummy files for loading.") + torch.save("", path) + + tag = _get_tag_from_path(path) + partition_name = os.path.basename(path) + logger.info(f"[Nebula] Saving {partition_name} under tag {tag}...") + self.checkpoint.save(partition_name, state_dict) + logger.info(f"[Nebula] Saved {partition_name} under tag {tag}.") + return None + + def load(self, path: str, map_location=None): + tag = _get_tag_from_path(path) + first_load_flag = self.tag_flag is None or self.tag_flag == tag + if not self.enable_nebula_load and first_load_flag: + self.tag_flag = tag + logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...") + partition = torch.load(path, map_location=map_location) + logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .") + return partition + + partition_name = os.path.basename(path) + logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...") + + checkpoint = None + if tag in (None, 'latest', 'latest_universal'): + # In some cases, there is the inconsistent tag between deepspeed metadata (latest file) + # and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we + # will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary + # when met failure loading for given tag, the loading priority would be like: + # nebula tier3 latest > nebula tier1 latest. + checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path) + else: + checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path) + + if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): + logger.info( + f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!" + ) + # nebula tier3 latest + checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path) + if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): + logger.info( + f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!" + ) + # nebula tier1 latest + checkpoint = torch_nebula.get_latest_checkpoint() + logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.") + return None + + tag = checkpoint.tag + self.tag_flag = -1 + partition = checkpoint.load(partition_name, map_location=map_location) + logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.") + return partition + + def commit(self, tag): + # nebula commit will be call when all files under give tag are ready to be persisted in the async way. + logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting") + commit_rls = self.checkpoint.commit() + if not commit_rls: + logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.") + return False + return commit_rls diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd44864bb2ea2fc7be3e903fa61044ed4a8e5ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.utils import logger, log_dist +from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ + CheckpointEngine + + +class TorchCheckpointEngine(CheckpointEngine): + + def __init__(self, config_params=None): + super().__init__(config_params) + + def create(self, tag): + log_dist(f"[Torch] Checkpoint {tag} is about to be saved!", ranks=[0]) + + def save(self, state_dict, path: str): + logger.info(f"[Torch] Saving {path}...") + torch.save(state_dict, path) + logger.info(f"[Torch] Saved {path}.") + return None + + def load(self, path: str, map_location=None): + logger.info(f"[Torch] Loading checkpoint from {path}...") + partition = torch.load(path, map_location=map_location) + logger.info(f"[Torch] Loaded checkpoint from {path}.") + return partition + + def commit(self, tag): + logger.info(f"[Torch] Checkpoint {tag} is ready now!") + return True diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c38d7ec43fe36922b45674e814f8ef9838872a44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43c60df67e4828503ab6fe2636142b2b3152ac17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/__pycache__/cupy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py new file mode 100644 index 0000000000000000000000000000000000000000..b959a9c2037203e3306755d11078e042e0a1b2be --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/compression/cupy.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import cupy +from torch.utils.dlpack import to_dlpack +from torch.utils.dlpack import from_dlpack + + +class CupyBackend(object): + + def __init__(self): + pass + + def torch2cupy(self, tensor): + return cupy.fromDlpack(to_dlpack(tensor)) + + def cupy2torch(self, cupy_tensor): + return from_dlpack(cupy_tensor.toDlpack()) + + def compress_by_chunk(self, cupy_bool_tensor, num_chunks): + packed_sign = cupy.packbits(cupy_bool_tensor) + sign_list_packed = cupy.split(packed_sign, num_chunks) + cupy.cuda.get_current_stream().synchronize() + return sign_list_packed diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf58eb1801af7cc02f3cc2057a5c843cd078b575 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .module import PipelineModule, LayerSpec, TiedLayerSpec +from .topology import ProcessTopology diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..1dda7f1aad32fbfca2e1fdbb3e462368a869f578 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/engine.py @@ -0,0 +1,1437 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from types import MethodType +from collections import OrderedDict + +import torch +from deepspeed import comm as dist + +from deepspeed.utils import logger +from deepspeed.utils.timer import ThroughputTimer +from deepspeed.accelerator import get_accelerator +from deepspeed.runtime.bf16_optimizer import BF16_Optimizer + +from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE +from deepspeed.utils.timer import FORWARD_MICRO_TIMER, FORWARD_GLOBAL_TIMER, BACKWARD_MICRO_TIMER, \ + BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_INNER_GLOBAL_TIMER, \ + BACKWARD_REDUCE_MICRO_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER, \ + STEP_MICRO_TIMER, STEP_GLOBAL_TIMER + +from ..utils import PartitionedTensor +from ..dataloader import RepeatingLoader +from ..zero.config import ZeroStageEnum +from ..activation_checkpointing import checkpointing as ds_checkpointing + +from .module import PipelineModule, PipelineError +from . import p2p +from . import schedule + +TARGET_ID = -2 +LOG_STAGE = -2 +DATA_PARALLEL_ID = -2 + +BATCH_INPUT_TIMER = 'batch_input' +TRAIN_BATCH_TIMER = 'train_batch' +PIPE_SEND_OUTPUT_TIMER = 'pipe_send_output' +PIPE_SEND_GRAD_TIMER = 'pipe_send_grad' +PIPE_RECV_INPUT_TIMER = 'pipe_recv_input' +PIPE_RECV_GRAD_TIMER = 'pipe_recv_grad' + + +def is_even(number): + return number % 2 == 0 + + +mem_alloced = 0 +mem_cached = 0 + + +def _tensor_bytes(tensor): + return tensor.numel() * tensor.element_size() + + +class PipelineEngine(DeepSpeedEngine): + """ A training engine hybrid pipeline, data, and model parallel training. + + This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule` + is provided. + """ + ID_TO_DTYPE = [ + torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8, + torch.int8, torch.int16, torch.int32, torch.int64, torch.bool + ] + DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)} + + def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): + super().__init__(*super_args, **super_kwargs) + assert isinstance(self.module, PipelineModule) \ + or (hasattr(self.module, 'wrapped') and isinstance(self.module.wrapped, PipelineModule)), \ + "model must base PipelineModule" + + assert self.zero_optimization_stage( + ) < ZeroStageEnum.gradients, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism" + + # We schedule the all-reduces, so disable it in super().backward() + self.enable_backward_allreduce = False + self.has_bool_tensors = has_bool_tensors + self.eval_return_logits = False + self.outputs = None + # BF16 Optimizer is hardcoded for fp32 gradient accumulation + self.using_bf16_optimizer = type(self.optimizer) == BF16_Optimizer + + # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB + self.pipeline_enable_backward_allreduce = True + + if self.elasticity_enabled(): + if not self.is_elastic_model_parallel_supported(): + assert not self.elasticity_enabled(), "Elasticity is not currently supported" \ + " with pipeline parallelism." + + # pipeline step for logging + self.log_batch_step_id = -1 + + self.micro_batch_size = self.train_micro_batch_size_per_gpu() + self.micro_batches = self.gradient_accumulation_steps() + + # Set Grid and Communication Groups + self.grid = self.module._grid + if self.grid.get_global_rank() == 0: + logger.info(f'CONFIG: micro_batches={self.micro_batches} ' + f'micro_batch_size={self.micro_batch_size}') + + self.global_rank = self.grid.get_global_rank() + + assert self.dp_world_size == self.grid.data_parallel_size + assert self.train_batch_size() == \ + self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size + + # Set Stage Inf + self.num_stages = self.grid.pipe_parallel_size + self.stage_id = self.grid.get_stage_id() + self.prev_stage = self.stage_id - 1 + self.next_stage = self.stage_id + 1 + + self.data_iterator = None + self.batch_fn = None + + self._force_grad_boundary = False + + self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(), + logging_fn=self.tput_log, + monitor_memory=False, + steps_per_output=self.steps_per_print()) + + # PipelineEngine needs to handle data loading specially due to only the first + # and last stages loading inputs/labels. We construct a sampler that uses + if self.training_data: + self._build_data_iter(self.training_data) + + self.is_pipe_parallel = self.grid.pipe_parallel_size > 1 + self.is_data_parallel = self.grid.data_parallel_size > 1 + self.is_model_parallel = self.grid.model_parallel_size > 1 + + # Partition input/output buffers + # XXX temporarily disable while I revert some partition hacks. + assert isinstance(self._config.pipeline['pipe_partitioned'], bool) + assert isinstance(self._config.pipeline['grad_partitioned'], bool) + self.is_pipe_partitioned = self.is_model_parallel and self._config.pipeline['pipe_partitioned'] + self.is_grad_partitioned = self.is_model_parallel and self._config.pipeline['grad_partitioned'] + logger.info(f'is_pipe_partitioned= {self.is_pipe_partitioned} ' + f'is_grad_partitioned= {self.is_grad_partitioned}') + + model_parameters = filter(lambda p: p.requires_grad, self.module.parameters()) + num_params = sum([p.numel() for p in model_parameters]) + unique_params = num_params + # Subtract tied parameters if we don't own them + if self.module.tied_comms: + tied_params = 0 + for key, d in self.module.tied_comms.items(): + if self.global_rank != min(d['ranks']): + tied_params += sum(p.numel() for p in d['module'].parameters()) + unique_params -= tied_params + params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) + dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) + params_tensor = params_tensor.tolist() + total_params = params_tensor[0] + unique_params = params_tensor[1] + if self.grid.data_parallel_id == 0: + logger.info(f'RANK={self.global_rank} ' + f'STAGE={self.stage_id} ' + f'LAYERS={self.module._local_stop - self.module._local_start} ' + f'[{self.module._local_start}, {self.module._local_stop}) ' + f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) ' + f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) ' + f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)') + + #initialize peer-2-peer communication and allreduce groups + if self.is_pipe_parallel: + p2p.init_process_groups(self.grid) + + # Pipeline buffers + self.num_pipe_buffers = 0 + self.pipe_buffers = { + 'inputs': [], # batch input and received activations + 'labels': [], # labels from batch input + 'outputs': [], # activations + 'output_tensors': [], # tensor object to preserve backward graph + } + self.pipe_recv_buf = None + self.grad_layer = None + + self.meta_buffer = None + + self.first_output_send = True + self.first_gradient_send = True + self.pipe_partition_input_meta_cache = None + self.pipe_partition_output_meta_cache = None + self.pipe_partition_grad_meta_cache = None + self.grad_partition_grad_layer_meta_cache = None + + #stores the loss for the current micro batch being processed + self.loss = torch.tensor(0.0).to(self.device) + + #stores the loss for the entire batch + self.total_loss = None + self.total_additional_losses = None + self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device) + self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device) + + # stores aggregated-DP train final loss and aggregated-DP additional losses, if any + # additional losses are stored as dict: {loss-name: agg-loss} + self.agg_train_loss = None + self.agg_additional_losses = None + + if self._config.pipeline['activation_checkpoint_interval'] > 0: + self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval'] + # set use_reentrant default to True. + if self._config.pipeline.get('use_reentrant') is None: + self._config.pipeline['use_reentrant'] = True + if self._config.pipeline['use_reentrant'] is False: + # set activation_checkpoint_func to non_reentrant_checkpoint func. + self.module.activation_checkpoint_func = ds_checkpointing.non_reentrant_checkpoint + if self.grid.get_global_rank() == 0: + logger.info(f'CONFIG: activation_checkpoint_func=non_reentrant_checkpoint') + + self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline + + if self.is_last_stage(): + self.loss_model = self.module.loss_fn + + self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe' + # Initialize pipeline communicators. Just send a 0. + if is_even(self.stage_id): + if not self.is_last_stage(): + p2p.send(self.loss, self.next_stage) + if not self.is_first_stage(): + p2p.recv(self.loss, self.prev_stage) + else: + if not self.is_first_stage(): + p2p.recv(self.loss, self.prev_stage) + if not self.is_last_stage(): + p2p.send(self.loss, self.next_stage) + + # XXX look into timer reporting timing + # Initialize some timers because of early weirdness. + if self.wall_clock_breakdown(): + self.timers(FORWARD_MICRO_TIMER).start() + self.timers(FORWARD_MICRO_TIMER).stop() + self.timers(BACKWARD_MICRO_TIMER).start() + self.timers(BACKWARD_MICRO_TIMER).stop() + self.timers(BACKWARD_INNER_MICRO_TIMER).start() + self.timers(BACKWARD_INNER_MICRO_TIMER).stop() + self.timers(BACKWARD_REDUCE_MICRO_TIMER).start() + self.timers(BACKWARD_REDUCE_MICRO_TIMER).stop() + self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).start() + self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).stop() + self.timers(STEP_MICRO_TIMER).start() + self.timers(STEP_MICRO_TIMER).stop() + + def set_has_attention_mask(self, value): + assert isinstance(value, bool) + self.has_attention_mask = value + + def _build_data_iter(self, dataset): + sampler = torch.utils.data.distributed.DistributedSampler(dataset, + num_replicas=self.dp_world_size, + rank=self.mpu.get_data_parallel_rank(), + shuffle=False) + # Build a loader and make it repeating. + pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler) + pipe_dataloader = RepeatingLoader(pipe_dataloader) + self.set_dataloader(pipe_dataloader) + + def _exec_reduce_tied_grads(self): + # We need to run this first to write to self.averaged_gradients; + # since this class turns `enable_backward_allreduce` off, + # `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine + # never actually runs. I suspect this is because of efficiency problems; get_flat_partition in + # stage2.py might do something expensive; someone will have to look into that later. But + # in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling + # needed to decide if it actually breaks everything. + # (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944) + if self.zero_optimization_partition_gradients(): + self.optimizer.overlapping_partition_gradients_reduce_epilogue() + + weight_group_list = self.module.get_tied_weights_and_groups() + for weight, group in weight_group_list: + grad = weight._hp_grad if self.using_bf16_optimizer else weight.grad + dist.all_reduce(grad, group=group) + + def _exec_reduce_grads(self): + self._force_grad_boundary = True + if self.pipeline_enable_backward_allreduce: + if self.using_bf16_optimizer: + # PP+BF16 work for ZeRO Stage 1 + self._bf16_reduce_grads() + else: + self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE) + self._force_grad_boundary = False + + def _bf16_reduce_grads(self): + self.buffered_allreduce_fallback(grads=None, elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) + + def _reserve_pipe_buffers(self, num_buffers): + """Ensure that each pipeline buffer has at least ``num_buffers`` slots. + + This method only reserves slots and does not allocate tensors. + + Args: + num_buffers (int): The number of buffers to reserve. + """ + if self.num_pipe_buffers >= num_buffers: + return + + num_added = num_buffers - self.num_pipe_buffers + for key in self.pipe_buffers: + self.pipe_buffers[key].extend([None] * num_added) + self.num_pipe_buffers = num_buffers + + def reset_activation_shape(self): + """Reset the buffers when the shape of activation and gradient change. + For example, for curriculum learning that changes the seqlen of each + sample, we need to call this whenever the seqlen is going to change. + """ + self.first_output_send = True + self.pipe_recv_buf = None + self.grad_layer = None + self.meta_buffer = None + + self.pipe_partition_input_meta_cache = None + self.pipe_partition_output_meta_cache = None + self.pipe_partition_grad_meta_cache = None + self.grad_partition_grad_layer_meta_cache = None + + def train_batch(self, data_iter=None): + """Progress the pipeline to train the next batch of data. The engine will ingest + ``self.train_batch_size()`` total samples collectively across all workers. + + + An iterator that over training data should be provided as an argument + unless ``deepspeed.initialize()`` was provided a training set. In that event, + the training data will automatically be read. + + + .. warning:: + A total of ``self.gradient_accumulation_steps()`` entries will be pulled + from ``data_iter`` by each pipeline. There must be sufficient + data left in ``data_iter`` or else a ``StopIteration`` will halt training. + + DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader` + that wraps data loaders to automatically restart upon a ``StopIteration``. + + Args: + data_iter (Iterator, optional): Iterator of training data. + + Returns: + The arithmetic mean of the losses computed this batch. + """ + if not torch._C.is_grad_enabled(): + raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.') + + # Curriculum learning could change activation shape + if self.curriculum_enabled_legacy(): + new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ + self.global_steps + 1) + if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: + self.reset_activation_shape() + self.curriculum_scheduler_legacy.first_step = False + elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ + self.global_steps): + self.reset_activation_shape() + + if data_iter is not None: + self.set_dataiterator(data_iter) + + self.module.train() + self.total_loss = None + self.total_additional_losses = None + self._compute_loss = True + + # Do the work + self.timers(TRAIN_BATCH_TIMER).start() + sched = schedule.TrainSchedule(micro_batches=self.micro_batches, + stages=self.num_stages, + stage_id=self.stage_id) + self._exec_schedule(sched) + + with torch.no_grad(): + self.agg_train_loss = self._aggregate_total_loss() + + self.timers(TRAIN_BATCH_TIMER).stop() + + if self.global_steps % self.steps_per_print() == 0: + if self.global_rank == 0: + elapsed = self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) / 1000.0 + iter_time = elapsed / self.steps_per_print() + tput = self.train_batch_size() / iter_time + log_str = f'steps: {self.global_steps} loss: {self.agg_train_loss:0.4f} ' + if self.agg_additional_losses is not None: + for loss_name, loss_value in self.agg_additional_losses.items(): + log_str += f'{loss_name}: {loss_value.item():0.4f} ' + log_str += f'iter time (s): {iter_time:0.3f} samples/sec: {tput:0.3f}' + print(log_str) + else: + self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) + + # Monitoring + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(), + self.global_samples)] + self.monitor.write_events(self.summary_events) + + if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0: + self.timers.log([ + PIPE_SEND_OUTPUT_TIMER, + PIPE_SEND_GRAD_TIMER, + PIPE_RECV_INPUT_TIMER, + PIPE_RECV_GRAD_TIMER, + ]) + + # TODO: should return precisely what loss returned and allow others to be queried? + return self.agg_train_loss + + def eval_batch(self, + data_iter, + return_logits=False, + compute_loss=True, + reduce_output='avg', + bcast_loss=True, + num_micro_batches=None): + """Evaluate the pipeline on a batch of data from ``data_iter``. The + engine will evaluate ``self.train_batch_size()`` total samples + collectively across all workers. + + This method is equivalent to: + + .. code-block:: python + + module.eval() + with torch.no_grad(): + output = module(batch) + + .. warning:: + A total of ``self.gradient_accumulation_steps()`` entries will be pulled + from ``data_iter`` by each pipeline. There must be sufficient + data left in ``data_iter`` or else a ``StopIteration`` will halt training. + + DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader` + that wraps data loaders to automatically restart upon a ``StopIteration``. + + Args: + data_iter (Iterator): Iterator of data to evaluate. + + Returns: + The arithmetic mean of the losses computed this batch. + """ + self.eval_return_logits = return_logits + self.module.eval() + + # Curriculum learning could change activation shape + if self.curriculum_enabled_legacy(): + new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ + self.global_steps + 1) + if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: + self.reset_activation_shape() + self.curriculum_scheduler_legacy.first_step = False + elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ + self.global_steps): + self.reset_activation_shape() + + eval_output = None + + self._compute_loss = compute_loss + + # Use the provided data iterator + train_iterator = self.data_iterator + self.set_dataiterator(data_iter) + + # set the number micro batches in case the user chose value than training + micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches + + # Do the work + sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, + stages=self.num_stages, + stage_id=self.stage_id) + + # prevent dead-lock with multiple evals sequence + dist.barrier() + + with torch.no_grad(): + self._exec_schedule(sched) + + if self.is_last_stage(): + eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output, micro_batches=micro_batches) + + if compute_loss and (bcast_loss or self.monitor.enabled): + eval_output = self._bcast_pipe_scalar(eval_output) + + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)] + self.monitor.write_events(self.summary_events) + + # Restore the training iterator + self.set_dataiterator(train_iterator) + + # Reset any buffers that may have been populated during the forward passes. + #ds_checkpointing.reset() + self.eval_return_logits = False + if return_logits: + outputs = self.outputs + self.outputs = None + return eval_output, outputs + return eval_output + + def set_train_batch_size(self, train_batch_size): + """Adjust the global batch size by increasing or decreasing the number of + micro-batches (i.e., gradient accumulation steps). The size of each micro-batch + (i.e., ``train_micro_batch_size_per_gpu``) is not changed. + Args: + train_batch_size (int): The new global batch size for training. + Raises: + ValueError: if ``train_batch_size`` is not divisible by the + configured micro-batch size and data parallelism. + """ + super().set_train_batch_size(train_batch_size) + self.micro_batches = self.gradient_accumulation_steps() + + def is_first_stage(self): + """True if this process is in the first stage in the pipeline.""" + return self.stage_id == 0 + + def is_last_stage(self): + """True if this process is in the last stage in the pipeline.""" + return self.stage_id == self.num_stages - 1 + + def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True, micro_batches=None): + if reduce is None: + return outputs + + if reduce.lower() == 'avg': + # first sum over all microbatches + if torch.is_tensor(outputs[0]): + reduced = sum(outputs) + else: + assert isinstance(outputs, (list, tuple)) + reduced = [torch.zeros_like(o) for o in outputs[0]] + for idx, out in outputs: + reduced[idx] += out + + # Average over the microbatches + reduced = self._scale_loss_by_gas(reduced, eval_micro_batches=micro_batches) + + # Average over DP groups + if reduce_dp and self.is_data_parallel: + if torch.is_tensor(reduced): + dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group()) + reduced /= self.dp_world_size + else: + for idx in range(len(reduced)): + dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group()) + reduced[idx] /= self.dp_world_size + + return reduced + else: + raise NotImplementedError(f'reduction type {reduce} not supported.') + + def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32): + # Default to last stage (e.g., for broadcasting loss) + if src_rank is None: + src_rank = self.grid.stage_to_global(self.num_stages - 1) + assert src_rank in self.grid.pp_group + + if self.global_rank == src_rank: + result = data.clone().detach().type(dtype).to(self.device) + else: + result = torch.Tensor([0.]).type(dtype).to(self.device) + + dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group()) + + return result + + def _aggregate_total_loss(self): + # Scale loss, average among DP ranks, and bcast loss to the rest of my DP group + if self.is_last_stage(): + # Scale loss and additional losses, if any + loss = self._scale_loss_by_gas(self.total_loss) + self.agg_additional_losses = self.total_additional_losses + if self.agg_additional_losses is not None: + self.agg_additional_losses = OrderedDict({ + loss_name: self._scale_loss_by_gas(_loss.clone().detach()) + for loss_name, _loss in self.agg_additional_losses.items() + }) + + self.dp_group_loss = loss.clone().detach() + agg_loss = self.dp_group_loss.clone().detach() + #print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True) + + # Average loss across all data-parallel groups + if self.is_data_parallel: + if self.agg_additional_losses is None: + dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) + agg_loss /= self.dp_world_size + else: + # use a single reduce op for agg_loss and additional losses, if any + assert '__train_loss__' not in self.agg_additional_losses.keys() + tensors = OrderedDict({'__train_loss__': agg_loss}) + tensors.update(self.agg_additional_losses.items()) + flat_tensor = torch.cat([t.clone().reshape(-1).detach() for t in tensors.values()]) + dist.all_reduce(flat_tensor, group=self.mpu.get_data_parallel_group()) + flat_tensor /= self.dp_world_size + offset = 0 + reduced_tensor = {} + for name, t in tensors.items(): + n_elem = t.numel() + reduced_tensor[name] = flat_tensor[offset:offset + n_elem].clone().detach().reshape(t.shape) + offset += n_elem + agg_loss = reduced_tensor['__train_loss__'] + self.agg_additional_losses = OrderedDict( + {name: reduced_tensor[name] + for name in self.agg_additional_losses.keys()}) + + assert self.global_rank in self.grid.pp_group + losses = [self.dp_group_loss, agg_loss] + if self.agg_additional_losses is not None: + losses += list(self.agg_additional_losses.values()) + losses = torch.stack(losses).float() + if self.is_pipe_parallel: + dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group()) + else: + # Get loss from last stage + src_rank = self.grid.stage_to_global(self.num_stages - 1) + assert src_rank in self.grid.pp_group + # losses to reduce are: dp_group_loss, agg_loss, model additional losses + # therefore: 2 + n_additional_losses + additional_losses = self.module.get_additional_losses() + n_additional_losses = 0 if additional_losses is None else len(additional_losses) + losses = torch.Tensor([0.] * (2 + n_additional_losses)).to(self.device) + dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group()) + self.dp_group_loss = losses[0].clone().detach() + agg_loss = losses[1].clone().detach() + if additional_losses is not None: + self.agg_additional_losses = OrderedDict( + {name: losses[2 + i].clone().detach() + for i, name in enumerate(additional_losses.keys())}) + return agg_loss + + def set_dataloader(self, loader): + """""" + if self.is_first_stage() or self.is_last_stage(): + self.training_dataloader = loader + self.data_iterator = iter(self.training_dataloader) + + def set_dataiterator(self, iterator): + """ Store an iterator to sample for training data. """ + if self.is_first_stage() or self.is_last_stage(): + self.training_dataloader = None + self.data_iterator = iterator + + def set_batch_fn(self, fn): + """Execute a post-processing function on input data. + + Args: + fn (function): The function to run. + """ + self.batch_fn = fn + + def is_gradient_accumulation_boundary(self): + """True if the engine is executing a gradient reduction or optimizer step instruction. + + This is overridden from :class:`DeepSpeedEngine` to force reductions + and steps when the pipeline engine is instructed to do so. + + Returns: + bool: whether reductions and optimizer steps should occur. + """ + return self._force_grad_boundary + + def log_for_device(self, *msg): + if LOG_STAGE == self.stage_id or LOG_STAGE == -1: + if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1: + print( + f'RANK={dist.get_rank()} ' + f'PIPE-ID={self.stage_id} ' + f'DATA-ID={self.grid.data_parallel_id} ' + f'MBATCH-ID={self.microbatch_id} ' + f'STEP-ID={self.log_batch_step_id} ' + '::', + *msg, + flush=True) + + def tput_log(self, *msg): + if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0: + print(*msg) + + def _next_batch(self): + # If using 3D parallelism, only some first-stage ranks may do IO + batch = None + if self.data_iterator is not None: + batch = next(self.data_iterator) + + # Any post-processing, like broadcasting across a slice-parallel group. + if self.batch_fn: + batch = self.batch_fn(batch) + + return batch + + def _exec_forward_pass(self, buffer_id): + self.tput_timer.start() + self.mem_status('BEFORE FWD', reset_max=True) + + if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple): + inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id]) + else: + inputs = self.pipe_buffers['inputs'][buffer_id].clone() + + # collect the partitioned input from the previous stage + if self.is_pipe_partitioned and not self.is_first_stage(): + if self.pipe_partition_input_meta_cache is None: + self.pipe_partition_input_meta_cache = inputs[0].to('cpu') + part_input = PartitionedTensor.from_meta(meta=self.pipe_partition_input_meta_cache, + local_part=inputs[1], + group=self.grid.get_slice_parallel_group()) + + inputs = (part_input.full(), *inputs[2:]) + inputs[0].requires_grad = True + # skip mask + #inputs[1].requires_grad = True + part_input = None + inputs = inputs[0] if len(inputs) == 1 else inputs + self.pipe_buffers['inputs'][buffer_id] = inputs + + # inputs has no gradient because it is from a cloned tensor + outputs = super().forward(inputs) + + # Reset activation checkpointing buffers. + # Need to call this between evaluation iterations + if not self.module.training: + ds_checkpointing.reset() + + # Partition the outputs if we are not the last stage + if self.is_pipe_partitioned and not self.is_last_stage(): + if isinstance(outputs, tuple): + first_output = outputs[0] + # TODO: Improve pipe partitioning to pass multiple tensors that require grads + assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]]) + outputs_tail = outputs[1:] + elif torch.is_tensor(outputs): + first_output = outputs + outputs_tail = [] + else: + raise ValueError("expecting a tensor or a tuple of tensors") + part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group()) + # Clear the large output data, but save the computation graph + first_output.data = torch.zeros(1) + self.pipe_buffers['output_tensors'][buffer_id] = first_output + # Inject the partitioned tensor into the output before sending + outputs = (part.to_meta(), part.data(), *outputs_tail) + part = None + + self.pipe_buffers['outputs'][buffer_id] = outputs + + # Optionally compute loss on the last device + if self.is_last_stage(): + if self._compute_loss and self.module.loss_fn is not None: + labels = self.pipe_buffers['labels'][buffer_id] + self.loss = self.module.loss_fn(outputs, labels) + else: + # Some models just return loss from forward() + self.loss = outputs + if self.eval_return_logits: + self.outputs = outputs + + if isinstance(self.loss, torch.Tensor): + self.fwd_outputs.append(self.loss.detach()) + else: + self.fwd_outputs.append([l.detach() for l in self.loss]) + + def add_to_total_loss(_total_loss, _loss): + if isinstance(_loss, torch.Tensor): + if _total_loss is None: + _total_loss = torch.zeros_like(_loss) + _total_loss += _loss.detach() + else: + if _total_loss is None: + _total_loss = [torch.zeros_like(_l) for _l in _loss] + for _idx, _l in enumerate(_loss): + _total_loss[_idx] += _l.detach() + return _total_loss + + self.total_loss = add_to_total_loss(self.total_loss, self.loss) + + # aggregate additional losses across gradient accumulation steps + additional_losses = self.module.get_additional_losses() + if additional_losses is not None: + if self.total_additional_losses is None: + self.total_additional_losses = OrderedDict() + for name, loss in additional_losses.items(): + total = self.total_additional_losses[name] if name in self.total_additional_losses else None + self.total_additional_losses[name] = add_to_total_loss(total, loss) + + def _exec_backward_pass(self, buffer_id): + assert self.optimizer is not None, "must provide optimizer during " \ + "init in order to use backward" + + self.mem_status('BEFORE BWD', reset_max=True) + + # The last stage just runs backward on the loss using DeepSpeed's typical + # mechanisms. + if self.is_last_stage(): + super().backward(self.loss) + self.mem_status('AFTER BWD') + return + + outputs = self.pipe_buffers['outputs'][buffer_id] + + if self.wall_clock_breakdown(): + self.timers(BACKWARD_MICRO_TIMER).start() + self.timers(BACKWARD_GLOBAL_TIMER).start() + self.timers(BACKWARD_INNER_MICRO_TIMER).start() + self.timers(BACKWARD_INNER_GLOBAL_TIMER).start() + + # Reconstruct if we previously partitioned the output. We must be + # careful to also restore the computational graph of the tensors we partitioned. + if self.is_pipe_partitioned: + if self.is_grad_partitioned: + if self.pipe_partition_output_meta_cache is None: + self.pipe_partition_output_meta_cache = outputs[0].to('cpu') + part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_output_meta_cache, + local_part=outputs[1], + group=self.grid.get_slice_parallel_group()) + self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full() + outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:]) + else: + # Already restored from partition + self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0] + outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:]) + + grad_tensors = self.grad_layer + if self.is_grad_partitioned: + #print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}') + if self.grad_partition_grad_layer_meta_cache is None: + self.grad_partition_grad_layer_meta_cache = self.grad_layer[0].to('cpu') + part_grad = PartitionedTensor.from_meta(meta=self.grad_partition_grad_layer_meta_cache, + local_part=self.grad_layer[1], + group=self.grid.get_slice_parallel_group()) + grad_tensors = (part_grad.full(), *grad_tensors[2:]) + part_grad = None + #print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}') + + if self.using_bf16_optimizer and not self.is_last_stage(): + # manually call because we don't call optimizer.backward() + self.optimizer.clear_lp_grads() + + # This handles either a single tensor or tuple of tensors. + if isinstance(outputs, tuple): + out_tensors = [t for t in outputs if t.is_floating_point()] + assert len(out_tensors) == len(grad_tensors) + torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors) + else: + torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, )) + + if self.using_bf16_optimizer and not self.is_last_stage(): + # manually call because we don't call optimizer.backward() + self.optimizer.update_hp_grads(clear_lp_grads=False) + + # Free up the memory from the output of forward() + self.pipe_buffers['output_tensors'][buffer_id] = None + self.pipe_buffers['outputs'][buffer_id] = None + grad_tensors = None + + if self.wall_clock_breakdown(): + self.timers(BACKWARD_INNER_MICRO_TIMER).stop() + self.timers(BACKWARD_INNER_GLOBAL_TIMER).stop() + self.timers(BACKWARD_MICRO_TIMER).stop() + self.timers(BACKWARD_GLOBAL_TIMER).stop() + + self.mem_status('AFTER BWD') + + def _exec_load_micro_batch(self, buffer_id): + if self.wall_clock_breakdown(): + self.timers(BATCH_INPUT_TIMER).start() + + batch = self._next_batch() + + if self.is_first_stage(): + loaded = None + if torch.is_tensor(batch[0]): + loaded = batch[0].clone().to(self.device).detach() + if self._config.pipeline['activation_checkpoint_interval'] > 0 and self._config.pipeline[ + 'use_reentrant']: + loaded.requires_grad = loaded.is_floating_point() + else: + assert isinstance(batch[0], (tuple, list)) + # Assume list or tuple + loaded = [] + for x in batch[0]: + assert torch.is_tensor(x) + mine = x.clone().detach().to(self.device) + if self._config.pipeline['activation_checkpoint_interval'] > 0 and self._config.pipeline[ + 'use_reentrant']: + mine.requires_grad = mine.is_floating_point() + loaded.append(mine) + loaded = tuple(loaded) + + self.pipe_buffers['inputs'][buffer_id] = loaded + + if self.is_last_stage(): + loaded = batch[1] + if torch.is_tensor(batch[1]): + loaded = batch[1].to(self.device) + # XXX: torch 1.6.0 DataLoader will auto convert tuple to list + elif isinstance(batch[1], (tuple, list)): + loaded = [] + for x in batch[1]: + assert torch.is_tensor(x) + x = x.to(self.device).detach() + loaded.append(x) + loaded = tuple(loaded) + + self.pipe_buffers['labels'][buffer_id] = loaded + + if self.wall_clock_breakdown(): + self.timers(BATCH_INPUT_TIMER).stop() + + def _send_tensor_meta(self, buffer, recv_stage): + """ Communicate metadata about upcoming p2p transfers. + + Metadata is communicated in this order: + * type (0: tensor, 1: list) + * num_tensors if type=list + foreach tensor in buffer: + * ndims + * shape + """ + send_bytes = 0 + if isinstance(buffer, torch.Tensor): + type_tensor = torch.LongTensor(data=[0]).to(self.device) + p2p.send(type_tensor, recv_stage) + send_shape = torch.LongTensor(data=buffer.size()).to(self.device) + send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device) + p2p.send(send_ndims, recv_stage) + p2p.send(send_shape, recv_stage) + send_bytes += _tensor_bytes(buffer) + elif isinstance(buffer, list): + assert (False) + type_tensor = torch.LongTensor(data=[1]).to(self.device) + p2p.send(type_tensor, recv_stage) + count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device) + p2p.send(count_tensor, recv_stage) + for tensor in buffer: + assert isinstance(tensor, torch.Tensor) + send_shape = torch.LongTensor(data=tensor.size()).to(self.device) + send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device) + p2p.send(send_ndims, recv_stage) + p2p.send(send_shape, recv_stage) + send_bytes += _tensor_bytes(tensor) + elif isinstance(buffer, tuple): + type_tensor = torch.LongTensor(data=[2]).to(self.device) + p2p.send(type_tensor, recv_stage) + count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device) + p2p.send(count_tensor, recv_stage) + for idx, tensor in enumerate(buffer): + assert isinstance(tensor, torch.Tensor) + send_shape = torch.LongTensor(data=tensor.size()).to(self.device) + send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device) + send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device) + p2p.send(send_dtype, recv_stage) + p2p.send(send_ndims, recv_stage) + p2p.send(send_shape, recv_stage) + # Useful for performance debugging. + ''' + new_bytes = _tensor_bytes(tensor) + send_bytes += _tensor_bytes(tensor) + # Useful for performance debugging. + if self.grid.data_parallel_id == 0: + print( + f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB' + ) + ''' + else: + raise NotImplementedError(f'Could not send meta type {type(buffer)}') + + # Useful for performance debugging. + ''' + if self.grid.data_parallel_id == 0: + print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB') + ''' + + def _recv_tensor_meta(self, send_stage): + """Receive metadata about upcoming p2p transfers and return allocated buffers. + + Metadata is communicated in this order: + * type (0: tensor, 1: list) + * num_tensors if type=list + foreach tensor in buffer: + * ndims + * shape + + Returns: + Allocated buffer for receiving from send_stage. + """ + + type_tensor = torch.LongTensor(data=[0]).to(self.device) + p2p.recv(type_tensor, send_stage) + recv_type = type_tensor.item() + + # A single tensor will be sent. + if recv_type == 0: + recv_ndims = torch.LongTensor(data=[0]).to(self.device) + p2p.recv(recv_ndims, send_stage) + recv_ndims = recv_ndims.item() + recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device) + p2p.recv(recv_shape, send_stage) + recv_shape = recv_shape.tolist() + return self._allocate_buffer(recv_shape, num_buffers=1)[0] + + # List or tuple of tensors + elif recv_type == 1 or recv_type == 2: + count_tensor = torch.LongTensor(data=[0]).to(self.device) + p2p.recv(count_tensor, send_stage) + num_tensors = count_tensor.item() + recv_shapes_and_dtypes = [] + for idx in range(num_tensors): + recv_dtype = torch.LongTensor(data=[0]).to(self.device) + p2p.recv(recv_dtype, send_stage) + recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()] + recv_ndims = torch.LongTensor(data=[0]).to(self.device) + p2p.recv(recv_ndims, send_stage) + recv_ndims = recv_ndims.item() + recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device) + p2p.recv(recv_shape, send_stage) + recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype)) + + buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0] + # Convert to tuples if requested. + if recv_type == 2: + buffers = tuple(buffers) + return buffers + + else: + raise NotImplementedError(f'Could not receive type {type(recv_type)}') + + def _exec_send_activations(self, buffer_id): + if self.wall_clock_breakdown(): + self.timers(PIPE_SEND_OUTPUT_TIMER).start() + + outputs = self.pipe_buffers['outputs'][buffer_id] + + # NCCL does not like to send torch.BoolTensor types, so cast the mask to half(). + # We could do char, but with half() we can eventually flatten with other fp16 + # messages (TODO) + if self.has_attention_mask or self.has_bool_tensors: + outputs = list(outputs) + outputs[-1] = outputs[-1].half() + outputs = tuple(outputs) + + if self.first_output_send: + self.first_output_send = False + self._send_tensor_meta(outputs, self.next_stage) + + if isinstance(outputs, torch.Tensor): + p2p.send(outputs, self.next_stage) + elif isinstance(outputs, tuple): + for idx, buffer in enumerate(outputs): + p2p.send(buffer, self.next_stage) + else: + raise NotImplementedError('Could not send output of type ' + f'{type(outputs)}') + + # Restore the boolean tensor + if self.has_attention_mask or self.has_bool_tensors: + outputs = list(outputs) + outputs[-1] = outputs[-1].bool() + outputs = tuple(outputs) + + if self.wall_clock_breakdown(): + self.timers(PIPE_SEND_OUTPUT_TIMER).stop() + + def _exec_send_grads(self, buffer_id): + if self.wall_clock_breakdown(): + self.timers(PIPE_SEND_GRAD_TIMER).start() + + inputs = self.pipe_buffers['inputs'][buffer_id] + + # Partition the gradient + if self.is_grad_partitioned: + if isinstance(inputs, tuple): + first_input = inputs[0] + assert all([torch.is_tensor(elt) for elt in inputs[1:]]) + inputs_grad_tail = [elt.grad for elt in inputs[1:]] + elif torch.is_tensor(inputs): + first_input = inputs + inputs_grad_tail = [] + else: + raise ValueError("expecting a tensor or a tuple of tensors") + assert torch.is_tensor(first_input) + part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group()) + + inputs = (part.to_meta(), part.data(), *inputs_grad_tail) + + # XXX Terrible hack + # Drop the attention mask from the input buffer here. It does not have + # a grad that needs to be communicated. We free the buffer immediately + # after, so no need to restore it. The receiver also has a hack that skips + # the recv. This is because NCCL does not let us send torch.BoolTensor :-(. + if self.has_attention_mask or self.has_bool_tensors: + inputs = list(inputs) + inputs.pop() + inputs = tuple(inputs) + + if isinstance(inputs, torch.Tensor): + assert inputs.grad is not None + p2p.send(inputs.grad, self.prev_stage) + else: + # XXX terrible hacky branch + if self.is_grad_partitioned: + # First two sends are partitioned gradient + p2p.send(inputs[0], self.prev_stage) + p2p.send(inputs[1], self.prev_stage) + else: + for idx, buffer in enumerate(inputs): + # Skip tensors that will not produce a grad + if not buffer.is_floating_point(): + assert buffer.grad is None + continue + assert buffer.grad is not None + p2p.send(buffer.grad, self.prev_stage) + + # We can free up the input buffer now + self.pipe_buffers['inputs'][buffer_id] = None + + if self.wall_clock_breakdown(): + self.timers(PIPE_SEND_GRAD_TIMER).stop() + + def _exec_recv_activations(self, buffer_id): + if self.wall_clock_breakdown(): + self.timers(PIPE_RECV_INPUT_TIMER).start() + + recvd = None + + # Allocate the buffer if necessary + if self.pipe_recv_buf is None: + self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage) + + if isinstance(self.pipe_recv_buf, torch.Tensor): + p2p.recv(self.pipe_recv_buf, self.prev_stage) + recvd = self.pipe_recv_buf.clone().detach() + recvd.requires_grad = recvd.is_floating_point() + else: + assert isinstance(self.pipe_recv_buf, tuple) + recvd = [None] * len(self.pipe_recv_buf) + for idx, buffer in enumerate(self.pipe_recv_buf): + assert torch.is_tensor(buffer) + # XXX hardcode meta type + if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long: + if self.meta_buffer is None: + self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device) + buffer = self.meta_buffer + + p2p.recv(buffer, self.prev_stage) + recvd[idx] = buffer.clone().detach() + + # NCCL does not like to send torch.BoolTensor types, so un-cast the + # attention mask + if self.has_attention_mask or self.has_bool_tensors: + recvd[-1] = recvd[-1].bool() + + recvd = tuple(recvd) + + for buffer in recvd: + buffer.requires_grad = buffer.is_floating_point() + + self.pipe_buffers['inputs'][buffer_id] = recvd + + if self.wall_clock_breakdown(): + self.timers(PIPE_RECV_INPUT_TIMER).stop() + + def _exec_recv_grads(self, buffer_id): + if self.wall_clock_breakdown(): + self.timers(PIPE_RECV_GRAD_TIMER).start() + + outputs = self.pipe_buffers['outputs'][buffer_id] + # XXX these shapes are hardcoded for Megatron + # Restore partitioned output if it was partitioned and we are sending full gradients + if self.is_pipe_partitioned and not self.is_grad_partitioned: + if self.pipe_partition_grad_meta_cache is None: + self.pipe_partition_grad_meta_cache = outputs[0].to('cpu') + part_output = PartitionedTensor.from_meta(meta=self.pipe_partition_grad_meta_cache, + local_part=outputs[1], + group=self.grid.get_slice_parallel_group()) + outputs[0].data = part_output.full() + outputs = (outputs[0], *outputs[2:]) + # save for backward + self.pipe_buffers['outputs'][buffer_id] = outputs + + # Allocate gradient if necessary + if self.grad_layer is None: + if isinstance(outputs, torch.Tensor): + s = list(outputs.size()) + self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0] + else: + # XXX This is a HACK + # When we exchange activations/gradients, the two pipe stages + # need to issue the send/recv with the same buffer sizes or + # else there is a deadlock. The is_floating_point() filter is + # used to avoid sending gradients for tensors that do not + # produce gradients. When TP>1, we partition the first + # activations/gradients across TP ranks to save communication + # volume and memory. That partitioned tensor is represented as + # two tensors: a 1/TPth chunk of the original data and also a + # small LongTensor storing the metadata used to reconstruct on + # the other side. When combined, the floating point filter also + # filtered out the metadata tensor. This quick (hacky) fix just + # branches on is_grad_partitioned so we don't filter out the + # metadata tensor. + if self.is_grad_partitioned: + sizes_and_dtypes = [(list(t.size()), t.dtype) + for t in outputs[:2]] + [(list(t.size()), t.dtype) + for t in outputs[2:] if t.is_floating_point()] + else: + sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()] + self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0] + + if isinstance(self.grad_layer, torch.Tensor): + p2p.recv(self.grad_layer, self.next_stage) + else: + assert isinstance(outputs, tuple) + for idx, buffer in enumerate(self.grad_layer): + # XXX GPT-2 hack + if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long: + buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device) + p2p.recv(buffer, self.next_stage) + + if self.wall_clock_breakdown(): + self.timers(PIPE_RECV_GRAD_TIMER).stop() + + def _exec_optimizer_step(self, lr_kwargs=None): + if self.wall_clock_breakdown(): + self.timers(STEP_MICRO_TIMER).start() + self.timers(STEP_GLOBAL_TIMER).start() + self.mem_status('BEFORE STEP', reset_max=True) + + self._force_grad_boundary = True + self._take_model_step(lr_kwargs) + self._force_grad_boundary = False + + self.mem_status('AFTER STEP') + + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)] + if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'): + self.summary_events.append( + (f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples)) + self.monitor.write_events(self.summary_events) + + if self.wall_clock_breakdown(): + self.timers(STEP_MICRO_TIMER).stop() + self.timers(STEP_GLOBAL_TIMER).stop() + if self.global_steps % self.steps_per_print() == 0: + self.timers.log([ + BATCH_INPUT_TIMER, + FORWARD_MICRO_TIMER, + BACKWARD_MICRO_TIMER, + BACKWARD_INNER_MICRO_TIMER, + BACKWARD_REDUCE_MICRO_TIMER, + STEP_MICRO_TIMER, + ]) + if self.global_steps % self.steps_per_print() == 0: + self.timers.log([ + FORWARD_GLOBAL_TIMER, + BACKWARD_GLOBAL_TIMER, + BACKWARD_INNER_GLOBAL_TIMER, + BACKWARD_REDUCE_GLOBAL_TIMER, + STEP_GLOBAL_TIMER, + ]) + + def _allocate_zeros(self, shape, **kwargs): + """ Allocate a tensor of zeros on the engine's device. + + Arguments: + shape: the shape of the tensor to allocate + kwargs: passed to torch.zeros() + + Returns: + A tensor from torch.zeros() allocated on self.device. + """ + if "dtype" not in kwargs: + if self.fp16_enabled(): + kwargs["dtype"] = torch.half + if self.bfloat16_enabled(): + kwargs["dtype"] = torch.bfloat16 + + return torch.zeros(shape, device=self.device, **kwargs) + + def _allocate_buffer(self, shape, num_buffers=-1, **kwargs): + buffers = [] + if num_buffers == -1: + num_buffers = self.num_pipe_buffers + for count in range(num_buffers): + buffers.append(self._allocate_zeros(shape, **kwargs)) + return buffers + + def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1): + buffers = [] + if num_buffers == -1: + num_buffers = self.num_pipe_buffers + for count in range(num_buffers): + buffer = [] + for shape, dtype in shapes_and_dtypes: + buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad)) + buffers.append(buffer) + return buffers + + def forward(self, *args, **kwargs): + """Disabled for pipeline parallel training. See ``train_batch()``. """ + raise PipelineError("Only train_batch() is accessible in pipeline mode.") + + def backward(self, *args, **kwargs): + """Disabled for pipeline parallel training. See ``train_batch()``. """ + raise PipelineError("Only train_batch() is accessible in pipeline mode.") + + def step(self, *args, **kwargs): + """Disabled for pipeline parallel training. See ``train_batch()``. """ + raise PipelineError("Only train_batch() is accessible in pipeline mode.") + + def mem_status(self, msg, print_rank=-1, reset_max=False): + return + global mem_alloced, mem_cached + if not self.global_steps == 0 or not self.global_steps == 9: + #return + pass + if self.mpu.get_data_parallel_rank() != 0: + return + + if self.global_rank != 0: + return + + rank = self.global_rank + if print_rank != -1 and rank != print_rank: + return + + get_accelerator().synchronize() + + if reset_max: + get_accelerator().reset_max_memory_cached() + get_accelerator().reset_max_memory_allocated() + + new_alloced = get_accelerator().memory_allocated() + new_cached = get_accelerator().memory_cached() + + delta_alloced = new_alloced - mem_alloced + delta_cached = new_cached - mem_cached + + mem_cached = new_cached + mem_alloced = new_alloced + + max_alloced = get_accelerator().max_memory_allocated() + max_cached = get_accelerator().max_memory_cached() + + # convert to GB for printing + new_alloced /= 1024**3 + new_cached /= 1024**3 + delta_alloced /= 1024**3 + delta_cached /= 1024**3 + max_alloced /= 1024**3 + max_cached /= 1024**3 + + print( + f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg, + f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) ' + f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)') + + def module_state_dict(self, exclude_frozen_parameters=False): + """Override hack to save a pipe model and return the directory path of the save. + + This method should only be called by DeepSpeed's ``save_checkpoint()``. The + recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()`` + is ``save_state_dict()``. + + Returns: + None + """ + assert isinstance(self.module, PipelineModule) + assert self._curr_ckpt_path is not None, \ + "PipelineEngine expects module_state_dict() to be called from save_checkpoint()" + + self.module.save_state_dict(self._curr_ckpt_path, + checkpoint_engine=self.checkpoint_engine, + exclude_frozen_params=exclude_frozen_parameters) + return None + + def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None, fetch_z3_params=False): + """Override hack to instead use a directory path. + + This is important because pipeline models checkpoint by layer instead of rank. + + If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``. + + Args: + state_dict (str, None): unused + strict (bool, optional): Strict state loading. Defaults to True. + """ + assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism" + state_dict = checkpoint if self.has_moe_layers else checkpoint['module'] + if (state_dict is not None) and (not isinstance(state_dict, str)): + super().load_module_state_dict(state_dict, strict) + return + + self.module.load_state_dir(load_dir=self._curr_ckpt_path, + strict=strict, + checkpoint_engine=self.checkpoint_engine) + + # A map of PipeInstruction types to methods. Each method will be executed with the + # kwargs provided to the PipeInstruction from the scheduler. + _INSTRUCTION_MAP = { + schedule.OptimizerStep: _exec_optimizer_step, + schedule.ReduceGrads: _exec_reduce_grads, + schedule.ReduceTiedGrads: _exec_reduce_tied_grads, + schedule.LoadMicroBatch: _exec_load_micro_batch, + schedule.ForwardPass: _exec_forward_pass, + schedule.BackwardPass: _exec_backward_pass, + schedule.SendActivation: _exec_send_activations, + schedule.RecvActivation: _exec_recv_activations, + schedule.SendGrad: _exec_send_grads, + schedule.RecvGrad: _exec_recv_grads, + } + + def _exec_schedule(self, pipe_schedule): + # Reserve and reset buffers. + self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers()) + self.fwd_outputs = [] + + # For each step in the schedule + for step_cmds in pipe_schedule: + # For each instruction in the step + for cmd in step_cmds: + if type(cmd) not in self._INSTRUCTION_MAP: + raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}') + + # Equivalent to: self._exec_forward_pass(buffer_id=0) + self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self) + self._exec_instr(**cmd.kwargs) + + def get_additional_losses(self): + return self.agg_additional_losses diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/schedule.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/schedule.py new file mode 100644 index 0000000000000000000000000000000000000000..21bf24dad8edcffd54d05d0c7092ab225e1e39dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/schedule.py @@ -0,0 +1,494 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..utils import call_to_str + +from abc import ABC, abstractmethod + + +class PipeSchedule(ABC): + """Directs the execution of a pipeline engine by generating sequences of + :class:`PipeInstruction`. + + Schedules are generators that yield sequences of + :class:`PipeInstruction` to process the micro-batches in one batch. + Each yielded step is atomic in the sense that a barrier + synchronization can be placed between successive steps without + deadlock. + + Below is an example schedule that implements data parallelism with gradient accumulation: + + .. code-block:: python + + class DataParallelSchedule(PipeSchedule): + def steps(self): + for step_id in range(self.micro_batches): + cmds = [ + LoadMicroBatch(buffer_id=0), + ForwardPass(buffer_id=0), + BackwardPass(buffer_id=0), + ] + if step_id == self.micro_batches - 1: + cmds.extend([ + ReduceGrads(), + OptimizerStep(), + ]) + yield cmds + + def num_pipe_buffers(self): + return 1 + + Args: + micro_batches (int): The number of micro-batches that comprise a batch. + stages (int): The number of pipeline stages. + stage_id (int): The pipe stage that will execute the generated schedule. + """ + + def __init__(self, micro_batches, stages, stage_id): + super().__init__() + self.micro_batches = micro_batches + self.stages = stages + self.stage_id = stage_id + self.prev_stage = self.stage_id - 1 + self.next_stage = self.stage_id + 1 + + @abstractmethod + def steps(self): + """Yield a list of :class:`PipeInstruction` for each step in the schedule. + + .. note:: + Schedules must implement ``steps()`` to define the schedule. + + Returns: + Instructions to be executed as one step of the pipeline + """ + pass + + def num_pipe_buffers(self): + """The number of pipeline buffers that will be used by this stage. + + .. note:: + Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale. + + Returns: + The number of buffers for the engine to allocate. + """ + return self.micro_batches + + def _valid_micro_batch(self, micro_batch_id): + return 0 <= micro_batch_id < self.micro_batches + + def _valid_stage(self, stage_id): + return 0 <= stage_id < self.stages + + @property + def stage(self): + """Stage index used to configure this schedule.""" + return self.stage_id + + @property + def num_stages(self): + """The number of total pipeline stages used to configure this schedule.""" + return self.stages + + @property + def num_micro_batches(self): + """The number of total micro_batches used to configure this schedule.""" + return self.micro_batches + + @property + def is_first_stage(self): + """True if the configured ``stage_id`` is the first stage in the pipeline.""" + return self.stage_id == 0 + + @property + def is_last_stage(self): + """True if the configured ``stage_id`` is the last stage in the pipeline.""" + return self.stage_id == self.stages - 1 + + def _buffer_idx(self, micro_batch_id): + """Map a micro-batch index to a pipeline buffer index. + + This method uses a cyclic allocation strategy. + + Args: + micro_batch_id (int): The micro-batch index relative to the beginning of the schedule. + + Returns: + int: The index of the buffer that should store data. + """ + assert self._valid_micro_batch(micro_batch_id) + return micro_batch_id % self.num_pipe_buffers() + + def __iter__(self): + self.it = None + return self + + def __next__(self): + if self.it is None: + self.it = self.steps() + return next(self.it) + + +class InferenceSchedule(PipeSchedule): + """A schedule for inferencing batches using pipeline parallelism. + """ + + def steps(self): + """""" + prev_micro_batch_id = -1 + total_steps = self.micro_batches + self.stages - 1 + for step_id in range(total_steps): + cmds = [] + micro_batch_id = step_id - self.stage_id + + # Alternate send/recv buffers + if _is_even(self.stage_id): + recv_buf = step_id % 2 + send_buf = (step_id + 1) % 2 + else: + recv_buf = (step_id + 1) % 2 + send_buf = step_id % 2 + + if self.is_first_stage or self.is_last_stage: + if self._valid_micro_batch(micro_batch_id): + cmds.append(LoadMicroBatch(recv_buf)) + + if _is_even(self.stage_id): + if self._valid_stage(self.next_stage): + if self._valid_micro_batch(micro_batch_id - 1): + cmds.append(SendActivation(send_buf)) + if self._valid_stage(self.prev_stage): + if self._valid_micro_batch(micro_batch_id): + cmds.append(RecvActivation(recv_buf)) + else: + if self._valid_stage(self.prev_stage): + if self._valid_micro_batch(micro_batch_id): + cmds.append(RecvActivation(recv_buf)) + + if self._valid_stage(self.next_stage): + if self._valid_micro_batch(micro_batch_id - 1): + cmds.append(SendActivation(send_buf)) + + if self._valid_micro_batch(micro_batch_id): + cmds.append(ForwardPass(recv_buf)) + + yield cmds + + def num_pipe_buffers(self): + """Only two pipeline buffers are required for inferencing. + + Returns: + ``2`` + """ + return 2 + + +class TrainSchedule(PipeSchedule): + """A schedule for training a batch using hybrid parallelism. + + Pipeline parallelism is extracted through gradient accumulation and thus + convergence follows that of a data parallel approach with the same batch + size. + """ + + def steps(self): + """""" + prev_micro_batch_id = -1 + total_steps = 2 * (self.micro_batches + self.stages - 1) + for step_id in range(total_steps): + # Map the step of the pipeline to the micro-batch id and also whether it is a + # forward or backward pass step. + micro_batch_id, is_forward = self._step_to_micro_batch(step_id) + + if self._valid_micro_batch(prev_micro_batch_id): + prev_buffer = self._buffer_idx(prev_micro_batch_id) + if self._valid_micro_batch(micro_batch_id): + curr_buffer = self._buffer_idx(micro_batch_id) + + cmds = [] + + # Exchange activations + if is_forward: + if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage): + cmds.append(SendGrad(prev_buffer)) + if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage): + cmds.append(RecvActivation(curr_buffer)) + else: + if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage): + cmds.append(RecvGrad(curr_buffer)) + if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage): + cmds.append(SendActivation(prev_buffer)) + + # First/last stage loads + if self.stage_id == 0 or self.stage_id == self.stages - 1: + if is_forward and self._valid_micro_batch(micro_batch_id): + cmds.append(LoadMicroBatch(curr_buffer)) + + # Computation + if self._valid_micro_batch(micro_batch_id): + if is_forward: + cmds.append(ForwardPass(curr_buffer)) + else: + cmds.append(BackwardPass(curr_buffer)) + + # Model step at the end of the batch + if step_id == total_steps - 1: + cmds.append(ReduceTiedGrads()) + cmds.append(ReduceGrads()) + cmds.append(OptimizerStep()) + + # Prepare state for next time + prev_micro_batch_id = micro_batch_id + yield cmds + + def num_pipe_buffers(self): + """Return the number of pipeline buffers required for this stage. + + This is equivalent to the maximum number of in-flight forward passes, + since we need to remember the activations of forward passes in order + to run backpropagation. For synchronous 1F1B, this is equivalent to + the index difference between this stage and the last stage. + """ + buffers = min(self.stages - self.stage_id, self.micro_batches) + return max(2, buffers) + + def _step_to_micro_batch(self, step_id): + if _is_even(step_id) and _is_even(self.stage_id): + micro_batch_id = self._even_step_forward_id(step_id) + is_forward = True + + elif _is_odd(step_id) and _is_odd(self.stage_id): + micro_batch_id = self._odd_step_forward_id(step_id) + is_forward = True + + elif _is_even(step_id) and _is_odd(self.stage_id): + micro_batch_id = self._even_step_backward_id(step_id) + is_forward = False + + elif _is_odd(step_id) and _is_even(self.stage_id): + micro_batch_id = self._odd_step_backward_id(step_id) + is_forward = False + + else: + assert False + + return micro_batch_id, is_forward + + def _even_step_forward_id(self, step_id): + base = step_id // 2 + micro_batch_id = int(base - self.stage_id // 2) + return micro_batch_id + + def _odd_step_forward_id(self, step_id): + base = (step_id - 1) // 2 + micro_batch_id = int(base - self.stage_id // 2) + return micro_batch_id + + def _even_step_backward_id(self, step_id): + base = step_id // 2 + micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2) + return micro_batch_id + + def _odd_step_backward_id(self, step_id): + base = ((step_id - 1) // 2) - self.stages + 1 + micro_batch_id = int(base + self.stage_id // 2) + return micro_batch_id + + +class DataParallelSchedule(PipeSchedule): + """An example schedule that trains using traditional data parallelism with gradient + accumulation. + """ + + def steps(self): + """""" + for step_id in range(self.micro_batches): + cmds = [ + LoadMicroBatch(buffer_id=0), + ForwardPass(buffer_id=0), + BackwardPass(buffer_id=0), + ] + if step_id == self.micro_batches - 1: + cmds.extend([ + ReduceGrads(), + OptimizerStep(), + ]) + yield cmds + + def num_pipe_buffers(self): + """Only one pipeline buffer needed. + """ + return 1 + + +class PipeInstruction: + """Base class for all instructions to be executed by the pipeline engine. + + All keyword arguments are stored as members similar to a ``namedtuple``. These are + then accessible to the :class:`PipeEngine` during execution. + + Args: + kwargs (optional): keyword arguments to store as members + """ + + def __init__(self, **kwargs): + self.name = self.__class__.__name__ + self.kwargs = kwargs + for key, val in kwargs.items(): + setattr(self, key, val) + + def __repr__(self): + return call_to_str(self.name, **self.kwargs) + + +class OptimizerStep(PipeInstruction): + """Performs one step with the optimizer and zeros gradients. + + .. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`. + + .. note:: Can be a synchronization point among data-parallel ranks. + """ + pass + + +class ReduceGrads(PipeInstruction): + """Reduce the computed gradients among data-parallel processes within the stage. + """ + pass + + +class ReduceTiedGrads(PipeInstruction): + """Reduce the computed gradients of tied modules within a pipeline-parallel group. + + .. warning:: + The stages included in this synchronization point are not known until + the model is partitioned among pipeline stages. In the worst case, it + includes all pipeline stages. This instruction should be scheduled + carefully to avoid deadlocks. + """ + pass + + +class BufferOpInstruction(PipeInstruction): + """A pipeline instruction that operates on pipeline buffer(s). + + Args: + buffer_id (int): the index of the pipeline buffer() to modify. + """ + + def __init__(self, buffer_id, **kwargs): + super().__init__(buffer_id=buffer_id, **kwargs) + + +# IO +class LoadMicroBatch(BufferOpInstruction): + """Load a micro-batch into a buffer. + + Roughly: + + .. code-block:: python + + buffers['inputs'][buffer_id] = next(data_iter) + """ + pass + + +# Compute +class ForwardPass(BufferOpInstruction): + """Compute a forward pass. + + Roughly: + + .. code-block:: python + + buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id]) + """ + pass + + +class BackwardPass(BufferOpInstruction): + """Compute a backward pass and accumulate gradients. + + Roughly: + + .. code-block:: python + + outputs = buffers['outputs'][buffer_id] + gradients = buffers['gradients'][buffer_id] + torch.autograd.backward(tensors=outputs, + grad_tensors=gradients) + """ + pass + + +# Communication +class SendActivation(BufferOpInstruction): + """Send activations to the next stage in the pipeline. + + Roughly: + + .. code-block:: python + + send(buffers['outputs'][buffer_id]) + + .. note:: + The communication is blocking and must be paired with a :class:`RecvActivation` + on the next pipeline stage to avoid deadlock. + """ + pass + + +class RecvActivation(BufferOpInstruction): + """Receive activations from the previous stage in the pipeline. + + Roughly: + + .. code-block:: python + + buffers['inputs'][buffer_id] = recv() + + .. note:: + The communication is blocking and must be paired with a :class:`SendActivation` + on the previous pipeline stage to avoid deadlock. + """ + pass + + +class SendGrad(BufferOpInstruction): + """Send computed gradients to the previous pipeline stage. + with respect to the received activations + + .. note:: + Only received tensors with ``requires_grad==True`` will produce gradients. + Missing gradients will be replaced with ``None`` on the receiving stage. + + .. note:: + The communication is blocking and must be paired with a :class:`RecvGrad` + on the previous pipeline stage to avoid deadlock. + """ + pass + + +class RecvGrad(BufferOpInstruction): + """Receive computed gradients the next pipeline stage. + + .. note:: + Only activations with ``requires_grad==True`` will produce gradients. + Missing gradients will be replaced with ``None``. + + .. note:: + The communication is blocking and must be paired with a :class:`SendGrad` + on the next pipeline stage to avoid deadlock. + """ + pass + + +def _is_even(x): + return x % 2 == 0 + + +def _is_odd(x): + return x % 2 != 0 diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/topology.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/topology.py new file mode 100644 index 0000000000000000000000000000000000000000..328c199071009669f41a6c2f531d1e741bb690f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/topology.py @@ -0,0 +1,456 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed import comm as dist + +from collections import namedtuple +from itertools import product as cartesian_product + + +class ProcessTopology: + """ Manages the mapping of n-dimensional Cartesian coordinates to linear + indices. This mapping is used to map the rank of processes to the grid + for various forms of parallelism. + + Each axis of the tensor is accessed by its name. The provided ordering + of the axes defines the layout of the topology. ProcessTopology uses a "row-major" + layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and + (x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates + (x,y) and (x+1,y) would be adjacent. + + Some methods return ProcessCoord namedtuples. + """ + + def __init__(self, axes, dims): + """Create a mapping of n-dimensional tensor coordinates to linear indices. + + Arguments: + axes (list): the names of the tensor axes + dims (list): the dimension (length) of each axis of the topology tensor + """ + + self.axes = axes # names of each topology axis + self.dims = dims # length of each topology axis + + # This is actually a class that lets us hash {'row':3, 'col':2} mappings + self.ProcessCoord = namedtuple('ProcessCoord', axes) + + self.mapping = {} + ranges = [range(d) for d in dims] + # example: 1, (0,0,1) + for global_rank, coord in enumerate(cartesian_product(*ranges)): + key = {axis: coord[self.axes.index(axis)] for axis in self.axes} + key = self.ProcessCoord(**key) + # for example, {ProcessCoord(row=0, col=1) : 1} + self.mapping[key] = global_rank + + def get_rank(self, **coord_kwargs): + """Return the global rank of a process via its coordinates. + + Coordinates are specified as kwargs. For example: + + >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) + >>> X.get_rank(x=0, y=1) + 1 + """ + if len(coord_kwargs) != len(self.axes): + raise ValueError('get_rank() does not support slices. Use filter_match())') + + key = self.ProcessCoord(**coord_kwargs) + assert key in self.mapping, f'key {coord_kwargs} invalid' + return self.mapping[key] + + def get_axis_names(self): + """Return a list of the axis names in the ordering of the topology. """ + return self.axes + + def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'): + """Return a string representation of a rank. + + This method is primarily used for checkpointing model data. + + For example: + >>> topo = Topo(axes=['a', 'b'], dims=[2, 2]) + >>> topo.get_rank_repr(rank=3) + 'a_01-b_01' + >>> topo.get_rank_repr(rank=3, omit_axes=['a']) + 'b_01' + + Args: + rank (int): A rank in the topology. + omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe']. + inner_sep (str, optional): [description]. Defaults to '_'. + outer_sep (str, optional): [description]. Defaults to '-'. + + Returns: + str: A string representation of the coordinate owned by ``rank``. + """ + omit_axes = frozenset(omit_axes) + axes = [a for a in self.get_axis_names() if a not in omit_axes] + names = [] + for ax in axes: + ax_rank = getattr(self.get_coord(rank=rank), ax) + names.append(f'{ax}{inner_sep}{ax_rank:02d}') + return outer_sep.join(names) + + def get_dim(self, axis): + """Return the number of processes along the given axis. + + For example: + >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) + >>> X.get_dim('y') + 3 + """ + if axis not in self.axes: + return 0 + return self.dims[self.axes.index(axis)] + + def get_coord(self, rank): + """Return the coordinate owned by a process rank. + + The axes of the returned namedtuple can be directly accessed as members. For + example: + >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) + >>> coord = X.get_coord(rank=1) + >>> coord.x + 0 + >>> coord.y + 1 + """ + for coord, idx in self.mapping.items(): + if idx == rank: + return coord + raise ValueError(f'rank {rank} not found in topology.') + + def get_axis_comm_lists(self, axis): + """ Construct lists suitable for a communicator group along axis ``axis``. + + Example: + >>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) + >>> topo.get_axis_comm_lists('pipe') + [ + [0, 4], # data=0, model=0 + [1, 5], # data=0, model=1 + [2, 6], # data=1, model=0 + [3, 7], # data=1, model=1 + ] + + Returns: + A list of lists whose coordinates match in all axes *except* ``axis``. + """ + + # We don't want to RuntimeError because it allows us to write more generalized + # code for hybrid parallelisms. + if axis not in self.axes: + return [] + + # Grab all axes but `axis` + other_axes = [a for a in self.axes if a != axis] + + lists = [] + + # Construct all combinations of coords with other_axes + ranges = [range(self.get_dim(a)) for a in other_axes] + for coord in cartesian_product(*ranges): + other_keys = {a: coord[other_axes.index(a)] for a in other_axes} + # now go over all ranks in `axis`. + sub_list = [] + for axis_key in range(self.get_dim(axis)): + key = self.ProcessCoord(**other_keys, **{axis: axis_key}) + sub_list.append(self.mapping[key]) + lists.append(sub_list) + + return lists + + def filter_match(self, **filter_kwargs): + """Return the list of ranks whose coordinates match the provided criteria. + + Example: + >>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) + >>> X.filter_match(pipe=0, data=1) + [2, 3] + >>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)] + [ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)] + + Arguments: + **filter_kwargs (dict): criteria used to select coordinates. + + Returns: + The list of ranks whose coordinates match filter_kwargs. + """ + + def _filter_helper(x): + for key, val in filter_kwargs.items(): + if getattr(x, key) != val: + return False + return True + + coords = filter(_filter_helper, self.mapping.keys()) + return [self.mapping[coord] for coord in coords] + + def get_axis_list(self, axis, idx): + """Returns the list of global ranks whose coordinate in an axis is idx. + + For example: + >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) + >>> X.get_axis_list(axis='x', idx=0) + [0, 1, 2] + >>> X.get_axis_list(axis='y', idx=0) + [0, 3] + """ + + # This could be faster by generating the desired keys directly instead of + # filtering. + axis_num = self.axes.index(axis) + ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx] + return ranks + + def world_size(self): + return len(self.mapping) + + def __str__(self): + return str(self.mapping) + + +def _prime_factors(N): + """ Returns the prime factorization of positive integer N. """ + if N <= 0: + raise ValueError("Values must be strictly positive.") + + primes = [] + while N != 1: + for candidate in range(2, N + 1): + if N % candidate == 0: + primes.append(candidate) + N //= candidate + break + return primes + + +class PipeDataParallelTopology(ProcessTopology): + """ A topology specialization for hybrid data and pipeline parallelism. + + Uses data parallelism on the last dimension to encourage gradient + reductions to use high-bandwidth intra-node links and lower-volume + pipeline communications to use low-bandwidth inter-node links. + """ + + def __init__(self, num_pp, num_dp): + super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp]) + + +class PipeModelDataParallelTopology(ProcessTopology): + """ A topology for hybrid pipeline, model, and data parallelism. """ + + def __init__(self, num_pp, num_mp, num_dp): + super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp]) + + +class PipelineParallelGrid: + """Implements a grid object that stores the data parallel ranks + corresponding to each of the model parallel stages + + The grid object organizes the processes in a distributed pytorch job + into a 2D grid, of stage_id and data_parallel_id. + + self.stage_id and self.data_parallel_id stores the stage id + and the data parallel id of current process. + + self.dp_group groups the processes by stage_id. + self.dp_group[i], is a list containing all process ranks whose + stage_id is i. + + self.p2p_groups stores a list of tuple, where each tuple + stores process ranks of adjacent stages for a given data_parallel_id. + For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4], + with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks, + for example tuple [4,0] with representing wrap-around stage 4 and 0, for + data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0] + for data_parallel_id = 1. + """ + + def __init__(self, topology=None, process_group=None): + # TODO use process_group if provided + self.global_rank = dist.get_rank() + self.world_size = dist.get_world_size() + if topology is not None: + if self.global_rank == 0: + print('Using topology:', topology) + self._topo = topology + else: + num_pp = 1 + num_dp = 1 + for idx, prime in enumerate(_prime_factors(self.world_size)): + if idx % 2 == 0: + num_pp *= prime + else: + num_dp *= prime + self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp) + self.data_parallel_size = max(self._topo.get_dim('data'), 1) + self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1) + self.model_parallel_size = max(self._topo.get_dim('model'), 1) + self.slice_parallel_size = self.model_parallel_size + assert self._is_grid_valid(), "Invalid Grid" + + self.stage_id = self.get_stage_id() + self.data_parallel_id = self.get_data_parallel_id() + + # Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these + # to detect overflow, etc. + self.ds_model_proc_group = None + self.ds_model_rank = -1 + for dp in range(self.data_parallel_size): + ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp)) + if self.global_rank == 0: + #print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}') + pass + proc_group = dist.new_group(ranks=ranks) + if self.global_rank in ranks: + self.ds_model_proc_group = proc_group + self.ds_model_world_size = len(ranks) + self.ds_model_rank = ranks.index(self.global_rank) + assert self.ds_model_rank > -1 + assert self.ds_model_proc_group is not None + + # Create new ProcessGroup for gradient all-reduces - these are the data parallel groups + self.dp_group = [] + self.dp_groups = self._topo.get_axis_comm_lists('data') + for g in self.dp_groups: + proc_group = dist.new_group(ranks=g) + if self.global_rank in g: + self.dp_group = g + self.dp_proc_group = proc_group + + self.is_first_stage = (self.stage_id == 0) + self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1)) + + self.p2p_groups = self._build_p2p_groups() + + # Create new ProcessGroup for pipeline collectives - these are pipe parallel groups + self.pp_group = [] + self.pp_proc_group = None + self.pipe_groups = self._topo.get_axis_comm_lists('pipe') + for ranks in self.pipe_groups: + if self.global_rank == 0: + #print(f'RANK={self.global_rank} building pipeline group: {ranks}') + pass + proc_group = dist.new_group(ranks=ranks) + if self.global_rank in ranks: + self.pp_group = ranks + self.pp_proc_group = proc_group + assert self.pp_proc_group is not None + + # Create new ProcessGroup for model (tensor-slicing) collectives + + # Short circuit case without model parallelism. + # TODO: it would be nice if topology had bcast semantics to avoid this branching + # case? + if self.model_parallel_size == 1: + for group_rank in range(self.world_size): + group_rank = [group_rank] + group = dist.new_group(ranks=group_rank) + if group_rank[0] == self.global_rank: + self.slice_group = group_rank + self.slice_proc_group = group + return + else: + self.mp_group = [] + self.model_groups = self._topo.get_axis_comm_lists('model') + for g in self.model_groups: + proc_group = dist.new_group(ranks=g) + if self.global_rank in g: + self.slice_group = g + self.slice_proc_group = proc_group + + def get_stage_id(self): + return self._topo.get_coord(rank=self.global_rank).pipe + + def get_data_parallel_id(self): + return self._topo.get_coord(rank=self.global_rank).data + + def _build_p2p_groups(self): + """Groups for sending and receiving activations and gradients across model + parallel stages. + """ + comm_lists = self._topo.get_axis_comm_lists('pipe') + p2p_lists = [] + for rank in range(self.world_size): + for l in comm_lists: + assert len(l) == self.pipe_parallel_size + if rank in l: + idx = l.index(rank) + buddy_rank = l[(idx + 1) % self.pipe_parallel_size] + p2p_lists.append([rank, buddy_rank]) + break # next global rank + assert len(p2p_lists) == self.world_size + return p2p_lists + + def _is_grid_valid(self): + ranks = 1 + for ax in self._topo.get_axis_names(): + ranks *= self._topo.get_dim(ax) + return ranks == dist.get_world_size() + + #returns the global rank of the process with the provided stage id + #which has the same data_parallel_id as caller process + def stage_to_global(self, stage_id, **kwargs): + me = self._topo.get_coord(self.global_rank) + transform = me._replace(pipe=stage_id, **kwargs)._asdict() + return self._topo.get_rank(**transform) + + def topology(self): + return self._topo + + # MPU functions for DeepSpeed integration + def get_global_rank(self): + return self.global_rank + + def get_pipe_parallel_rank(self): + """ The stage of the pipeline this rank resides in. """ + return self.get_stage_id() + + def get_pipe_parallel_world_size(self): + """ The number of stages in the pipeline. """ + return self.pipe_parallel_size + + def get_pipe_parallel_group(self): + """ The group of ranks within the same pipeline. """ + return self.pp_proc_group + + def get_data_parallel_rank(self): + """ Which pipeline this rank resides in. """ + return self.data_parallel_id + + def get_data_parallel_world_size(self): + """ The number of pipelines. """ + return self.data_parallel_size + + def get_data_parallel_group(self): + """ The group of ranks within the same stage of all pipelines. """ + return self.dp_proc_group + + # These are model parallel groups across all types of model parallelism. + # Deepspeed uses them to detect overflow, etc. + def get_model_parallel_rank(self): + return self.ds_model_rank + + def get_model_parallel_world_size(self): + return self.ds_model_world_size + + def get_model_parallel_group(self): + return self.ds_model_proc_group + + # For Megatron-style tensor slicing + def get_slice_parallel_rank(self): + if 'model' in self._topo.get_axis_names(): + return self._topo.get_coord(rank=self.global_rank).model + else: + return 0 + + def get_slice_parallel_world_size(self): + return self.slice_parallel_size + + def get_slice_parallel_group(self): + return self.slice_proc_group diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ccca09a9e69806c6e03e94b5672b106cae6fc20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .partition_parameters import ZeroParamType +from .partition_parameters import ZeroParamStatus +from .partition_parameters import Init +from .partition_parameters import GatheredParameters +from .partition_parameters import register_external_parameter + +from .tiling import TiledLinear +from .tiling import TiledLinearReturnBias + +from .mics import MiCS_Init diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c18288f872f3f847dab28cb26c032c2d44a36ca6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04df96d96142fb13542dfe35010523d578f9b193 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/contiguous_memory_allocator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/contiguous_memory_allocator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d993ac5b2926394cacbf04b840428950a3dedda6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/contiguous_memory_allocator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60c7132016644474991bbcdd2b8eb7af03a526dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14ba4f92bf7515f854ec8d11527244c98f06e0c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d25e247ab14849b5fcbf45b38a37dc8d1939ba8d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/mics_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/offload_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/offload_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cac46d9f5fb2bf717ae2a12e77e84d55d338b23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/offload_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/parameter_offload.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/parameter_offload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd1d682ec6515af18b0958d7381b50d0c1713af Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/parameter_offload.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partition_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partition_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..241ace9d4553c832abe52087b3ace2b806ea6bc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partition_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_coordinator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_coordinator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf366e6f663ac1c60c19af2b554a2804d423d73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_coordinator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2868f1362172d3b6569d1e34e809caf363d0ca7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/partitioned_param_profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage3.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..084eb8e7cb0e2d85bf80dadb3a802ae69d0eb387 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage_1_and_2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage_1_and_2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78541a808daa116fd692bcc8faf2574fe2b84295 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/stage_1_and_2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/test.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/test.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..834be54f4c730d2157390c4a42ad33c1347af753 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/test.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/tiling.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/tiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caf000d9c143302be5d8c6f64866e411aba97126 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/tiling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b7d77d9dc4f62755a196e0a1bde623ffd7eb69 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/config.py new file mode 100644 index 0000000000000000000000000000000000000000..76583c129cb91868469894fca0a98af3bf226bc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/config.py @@ -0,0 +1,317 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +from typing import Optional +from enum import Enum +from deepspeed.pydantic_v1 import Field, validator, root_validator +from deepspeed.runtime.config_utils import get_scalar_param, pp_int, DeepSpeedConfigModel +from deepspeed.utils import logger +from .offload_config import DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig, OffloadDeviceEnum + +# ZeRO optimization. By default, this optimization is not enabled. +# Users have to configure the desired optimization (0 means disabled) in params.json as below example: +ZERO_FORMAT = """ +ZeRO optimization should be enabled as: +"session_params": { + "zero_optimization": { + "stage": [0|1|2], + "stage3_max_live_parameters" : 1000000000, + "stage3_max_reuse_distance" : 1000000000, + "allgather_partitions": [true|false], + "use_multi_rank_bucket_allreduce": [true|false], + "allgather_bucket_size": 500000000, + "reduce_scatter": [true|false], + "contiguous_gradients" : [true|false] + "overlap_comm": [true|false], + "reduce_bucket_size": 500000000, + "load_from_fp32_weights": [true|false], + "cpu_offload": [true|false] (deprecated), + "cpu_offload_params" : [true|false] (deprecated), + "cpu_offload_use_pin_memory": [true|false] (deprecated), + "sub_group_size" : 1000000000000, + "offload_param": {...}, + "offload_optimizer": {...}, + "ignore_unused_parameters": [true|false], + "round_robin_gradients": [true|false], + "zero_hpz_partition_size": 1, + "zero_quantized_weights": [true|false], + "zero_quantized_nontrainable_weights": [true|false], + "zero_quantized_gradients": [true|false], + "memory_efficient_linear": [true|false], + "override_module_apply": [true|false], + } +} +""" + +ZERO_OPTIMIZATION = "zero_optimization" + + +def read_zero_config_deprecated(param_dict): + zero_config_dict = {} + zero_config_dict["stage"] = 1 if param_dict[ZERO_OPTIMIZATION] else 0 + if zero_config_dict["stage"] > 0: + zero_config_dict["allgather_bucket_size"] = get_scalar_param(param_dict, "allgather_size", 5e8) + logger.warning( + "DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}". + format(ZERO_FORMAT)) + return zero_config_dict + + +def get_zero_config(param_dict): + if ZERO_OPTIMIZATION in param_dict: + zero_config_dict = param_dict[ZERO_OPTIMIZATION] + if isinstance(zero_config_dict, bool): + zero_config_dict = read_zero_config_deprecated(param_dict) + else: + zero_config_dict = {} + return DeepSpeedZeroConfig(**zero_config_dict) + + +class ZeroStageEnum(int, Enum): + """ Enum class for possible zero stages """ + disabled = 0 + optimizer_states = 1 + gradients = 2 + weights = 3 + max_stage = 3 + + +class DeepSpeedZeroConfig(DeepSpeedConfigModel): + """ + Sets parameters for ZeRO optimizations. + """ + + stage: ZeroStageEnum = 0 + """ + Chooses different stages of ZeRO Optimizer. Stage 0, 1, 2, and 3 refer + to disabled, optimizer state partitioning, and optimizer+gradient state + partitioning, and optimizer+gradient+parameter partitioning, respectively. + """ + + contiguous_gradients: bool = True + """ + Copies the gradients to a contiguous buffer as they are produced. Avoids + memory fragmentation during backward pass. + """ + + reduce_scatter: bool = True + """ + Uses reduce or reduce scatter instead of allreduce to average gradients + """ + + reduce_bucket_size: int = Field(pp_int(5e8), ge=0) + """ + Number of elements reduced/allreduced at a time. Limits the memory required + for the allgather for large model sizes + """ + + use_multi_rank_bucket_allreduce: bool = True + """ + Combine the reduce buckets of the different ranks and do an All-Reduce instead of multiple Reduce ops. + This feature is useful when the model is small and we want to scale it on too many GPUs which therefore + reduces the message sizes of each packet. + """ + + allgather_partitions: bool = True + """ + Chooses between allgather collective or a series of broadcast collectives + to gather updated parameters from all the GPUs at the end of each step + """ + + allgather_bucket_size: int = Field(pp_int(5e8), ge=0) + """ + Number of elements allgathered at a time. Limits the memory required for + the allgather for large model sizes + """ + + overlap_comm: bool = None # None for dynamic default value (see validator `overlap_comm_valid` below) + """ + Attempts to overlap the reduction of the gradients with backward computation + """ + + load_from_fp32_weights: bool = True + """ + Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoint (no precision loss) or from model's fp16 copies (with + precision loss). This can be used to initialize optimizer state even when + checkpoint is missing optimizer state. + """ + + elastic_checkpoint: bool = False + """ + Enable loading checkpoint that was saved by job with different GPU count. + No longer supported. + """ + + offload_param: Optional[DeepSpeedZeroOffloadParamConfig] = None + """ + Enable offloading of model parameters to CPU or NVMe. This frees up GPU + memory for larger models or batch sizes. Valid only with stage 3. Expects a + dictionary containing values for :any:`DeepSpeedZeroOffloadParamConfig`. + """ + + offload_optimizer: Optional[DeepSpeedZeroOffloadOptimizerConfig] = None + """ + Enable offloading of optimizer state to CPU or NVMe, and optimizer + computation to CPU. This frees up GPU memory for larger models or batch + sizes. Valid for ZeRO stage 1, 2, 3. Expects a dictionary containing values + for :any:`DeepSpeedZeroOffloadOptimizerConfig`. + """ + + sub_group_size: int = Field(pp_int(1e9), ge=0) + """ + Tile size for parameter processing to fit massive models (with trillions of + parameters). Used by ZeRO3-Offload and ZeRO-Infinity + """ + + cpu_offload_param: bool = Field( + None, + deprecated=True, + new_param="offload_param", + new_param_fn=(lambda val: DeepSpeedZeroOffloadParamConfig(device=OffloadDeviceEnum.cpu) if val else None), + ) + """ Deprecated, please use ``offload_param`` """ + + cpu_offload_use_pin_memory: bool = Field( + None, + deprecated=True, + new_param="offload_param or offload_optimizer", + set_new_param=False, + ) + """ Deprecated, please use ``offload_param`` or ``offload_optimizer`` """ + + cpu_offload: bool = Field( + None, + deprecated=True, + new_param="offload_optimizer", + new_param_fn=(lambda val: DeepSpeedZeroOffloadOptimizerConfig(device=OffloadDeviceEnum.cpu) if val else None), + ) + """ Deprecated, please use ``offload_optimizer`` """ + + prefetch_bucket_size: int = Field(pp_int(5e7), ge=0, alias="stage3_prefetch_bucket_size") + """ + Maximum number of parameter elements to fetch ahead of use. Used by ZeRO3, + ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference. + """ + + param_persistence_threshold: int = Field(pp_int(1e5), ge=0, alias="stage3_param_persistence_threshold") + """ + Do not partition parameters smaller than this threshold. Smaller values use + less memory, but can greatly increase communication (especially + latency-bound messages). + """ + + model_persistence_threshold: int = Field(pp_int(sys.maxsize, "sys.maxsize"), + ge=0, + alias="stage3_model_persistence_threshold") + """ + Maximum number of parameter elements that can be persisted in GPU and not + partitioned. This imposes an upper bound on the number of unpartitioned + parameters resulting from param_persistence_threshold setting. Used by + ZeRO3-Offload, ZeRO-Infinity and ZeRO-Inference. + """ + + max_live_parameters: int = Field(pp_int(1e9), ge=0, alias="stage3_max_live_parameters") + """ + The maximum number of parameters resident per GPU before releasing. Smaller + values use less memory, but perform more communication. + """ + + max_reuse_distance: int = Field(pp_int(1e9), ge=0, alias="stage3_max_reuse_distance") + """ + Do not release a parameter if it will be reused within this threshold of + parameters. Smaller values use less memory, but perform more communication. + """ + + gather_16bit_weights_on_model_save: bool = Field(False, alias="stage3_gather_16bit_weights_on_model_save") + """ + Consolidate the weights before saving the model by ``save_16bit_model()``. + Since the weights are partitioned across GPUs, they aren’t part of + ``state_dict``, so this function automatically gathers the weights when + this option is enabled and then saves the fp16 model weights. + """ + + stage3_gather_fp16_weights_on_model_save: bool = Field(False, + deprecated=True, + new_param="gather_16bit_weights_on_model_save") + """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + + ignore_unused_parameters: bool = True + """ + Unused parameters in modules may be unexpected in static networks, but + could be normal in dynamic networks. This controls whether or not training + should terminate with an error message when unused parameters are detected. + This is set to ``True`` by default, which means unused parameters are + ignored and training continues. Now is just used in stage 2. + """ + + legacy_stage1: bool = False + """ + For backward-compatibility enable old ZeRO stage 1 implementation. Use at + your own risk, will be deprecated soon. + """ + + round_robin_gradients: bool = False + """ + Stage 1 and 2 optimization for CPU offloading that parallelizes gradient + copying to CPU memory among ranks by fine-grained gradient partitioning. + Performance benefit grows with gradient accumulation steps (more copying + between optimizer steps) or GPU count (increased parallelism). + """ + zero_hpz_partition_size: int = Field(1, ge=0) + """ + Number of ranks in zero parameters partitioning secondary group + """ + zero_quantized_weights: bool = False + """ + Boolean indicating whether to quantize zero parameters (weights) + for efficient all_gather comm + """ + zero_quantized_nontrainable_weights: bool = False + """ + Boolean indicating whether to quantize non-trainable zero parameters (weights) + for efficient memory usage and communication. Different from zero_quantized_weights + that stores the weights in original precision and only perform quantization during communication, + this flag will store the weights in quantized precision. This is useful for LoRA training. + """ + zero_quantized_gradients: bool = False + """ + Boolean indicating whether to use quantized zero gradients + for efficient all_2_all_reduce comm + """ + + mics_shard_size: int = Field(-1, new_param="mics_shard_size") + + mics_hierarchical_params_gather: bool = False + + memory_efficient_linear: bool = True + """ + Use memory efficient linear implementation, for Stage 3. + """ + """ + Whether force load checkpoint in pipeline mode, current only for Stage 3. + """ + pipeline_loading_checkpoint: bool = False + + override_module_apply: bool = True + """ + Override nn.Module apply function, for Stage 3. + """ + + # Validators + @validator("overlap_comm") + def overlap_comm_valid(cls, field_value, values): + if field_value is None: + assert ("stage" in values), "DeepSpeedZeroConfig: 'stage' must be defined before 'overlap_comm'" + field_value = values["stage"] == ZeroStageEnum.weights + return field_value + + @root_validator + def offload_ratio_check(cls, values): + offload_config = getattr(values, "offload_optimizer", {}) + if offload_config and offload_config.ratio < 1.0: + assert values.get("stage") == ZeroStageEnum.weights, "Partial offloading only supported for ZeRO Stage 3." + return values diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/contiguous_memory_allocator.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/contiguous_memory_allocator.py new file mode 100644 index 0000000000000000000000000000000000000000..35b3d5c7dd5d333e621cc554a73bb3c90e2a450e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/contiguous_memory_allocator.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from deepspeed import comm as dist + + +def print_rank_0(message): + if dist.get_rank() == 0: + print(message) + + +class ContiguousMemoryAllocator(object): + + def __init__(self, size, dtype, device): + self.buffer = torch.zeros(size, dtype=dtype, device=device) + + #address to contiguous size available + self.contiguous_sizes = {} + + self.contiguous_sizes[0] = size + + #tensor id to its address + self.tensor_addresses = {} + + #tensor address to its size + self.tensor_sizes = {} + + #tensor address to ids + self.tensor_ids = {} + + #id to tensors + self.tensor_map = {} + + #id to params. Maps each tensor buffer to list of parameters that uses it + self.id_to_params = {} + + self.total_size = size + self.total_free = size + self.largest_contiguous = size + self.max_allocated = 0 + + self.count = 0 + + #create a tensor of size from the pre-allocated buffer + #if not enough free space will fail + #if not enough contiguous space, will defragment and allocate + def allocate_tensor(self, size): + free_before = self.total_free + + assert size <= self.total_free, "Not enough memory in buffer. Allocation failed" + if self.largest_contiguous < size: + print_rank_0("Needs defragmentation to allocate. Before Defragmentation:") + self.print_allocation(resolution=100) + self._defragment_memory() + #set the param data to the new tensor buffer locations + self._reset_param_data() + print_rank_0("After defragmentation:") + self.print_allocation(resolution=100) + + self.total_free = self.total_free - size + + allocated = self.total_size - self.total_free + if allocated > self.max_allocated: + self.max_allocated = allocated + + tensor_address = self._get_new_tensor_address(size) + + ret_tensor = self._get_new_tensor(tensor_address, size) + print_rank_0( + f"Free before allocation {free_before}. Allocating {size}. Free after allocation {self.total_free}. Max allocated {self.max_allocated}" + ) + assert self.total_free + size == free_before, "Allocation bookkeeping error" + + return ret_tensor + + #assigns the tensor data to the param data and keeps track of the assignment + #any change the underlying buffer from defragmentation will cause a + #reassignment of the param data + def assign_to_param(self, tensor, param, numel, shape): + tensor_id = id(tensor) + + assert tensor_id in self.tensor_map.keys(), "No such tensor allocated by the allocator." + assert tensor.numel() >= numel, "Assert tensor buffer does is not large enough" + assert not tensor_id in self.id_to_params.keys(), "This tensor has already been assigned to a param" + + self.id_to_params[tensor_id] = [param] + + replicated_tensor = tensor.narrow(0, 0, numel).view(shape) + param.data = replicated_tensor.data + param.contiguous_tensor_id = tensor_id + + #deletes the tensor and frees up the underlying buffer + def release_tensor(self, tensor): + free_before = self.total_free + tensor_id = id(tensor) + tensor_size = tensor.numel() + self._release_tensor(tensor_id) + self._unassign_params(tensor_id) + self.total_free += tensor_size + print_rank_0( + f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.") + assert self.total_free - tensor_size == free_before, "Release bookkeeping error" + + def release_tensor_with_id(self, tensor_id): + free_before = self.total_free + assert tensor_id in self.tensor_map.keys(), "Invalid tensor id" + tensor = self.tensor_map[tensor_id] + tensor_size = tensor.numel() + self._release_tensor(tensor_id) + self._unassign_params(tensor_id) + self.total_free += tensor_size + print_rank_0( + f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.") + assert self.total_free - tensor_size == free_before, "Release bookkeeping error" + + #shows the current memory allocation at specified resolution + def print_allocation(self, resolution=200): + total_size = self.buffer.numel() * 1.0 + empty = [] + for addr, size in self.contiguous_sizes.items(): + start = int(addr * resolution / total_size) + end = int((addr + size) * resolution / total_size) + empty.extend(range(start, end)) + s = '' + for i in range(resolution): + s += '.' if i in empty else '|' + print_rank_0(s) + + def max_allocated(self): + return self.max_allocated + + #to be called after defragmentation that moves the tensor buffers + #this call reassigns the data of all the parameters using the tensor buffers + def _reset_param_data(self): + for id, tensor in self.tensor_map.items(): + for param in self.id_to_params[id]: + param.data = tensor.narrow(0, 0, param.numel()).view(param.data.shape).data + + def _unassign_params(self, tensor_id): + if tensor_id in self.id_to_params.keys(): + del self.id_to_params[tensor_id] + + def _release_tensor(self, tensor_id): + assert tensor_id in self.tensor_addresses, f"Tensor id {tensor_id} not found" + + address = self.tensor_addresses[tensor_id] + contiguous_size = self.tensor_map[tensor_id].numel() + + del self.tensor_addresses[tensor_id] + del self.tensor_ids[address] + del self.tensor_map[tensor_id] + del self.tensor_sizes[address] + + self._consolidate_address(address, contiguous_size) + self.largest_contiguous = self._largest_contiguous() + + def _consolidate_address(self, address, contiguous_size): + + #consolidate next buffer + end_address = address + contiguous_size + if end_address in self.contiguous_sizes: + contiguous_size += self.contiguous_sizes[end_address] + del self.contiguous_sizes[end_address] + + #consolidate previous buffer + for addr, size in self.contiguous_sizes.items(): + if addr + size == address: + del self.contiguous_sizes[addr] + contiguous_size += size + address = addr + break + + self.contiguous_sizes[address] = contiguous_size + + def _defragment_memory(self): + empty_addresses = sorted(self.contiguous_sizes.keys()) + tensor_addresses = sorted(self.tensor_addresses.values()) + + tensor_index = 0 + + while tensor_index < len(tensor_addresses): + + empty_addr = empty_addresses[0] + empty_size = self.contiguous_sizes[empty_addr] + + tensor_addr = tensor_addresses[tensor_index] + tensor_size = self.tensor_sizes[tensor_addr] + tensor_id = self.tensor_ids[tensor_addr] + tensor = self.tensor_map[self.tensor_ids[tensor_addr]] + + assert tensor_size == tensor.numel(), \ + f"Size mismatch. {tensor_size} is allocated at addr {tensor_addr} but tensor size is {tensor.numel()} " + + assert empty_addr != tensor_addr, \ + f"Cannot have same empty address {empty_addr} and tensor address {tensor_addr}" + + if empty_addr < tensor_addr: + + if empty_size >= tensor_size: + dest_buffer = self.buffer.narrow(0, empty_addr, tensor_size) + src_buffer = self.buffer.narrow(0, tensor_addr, tensor_size) + dest_buffer.data.copy_(src_buffer.data) + else: + + #print_rank_0(f'empty addr : {empty_addr}, empty size {empty_size} tensor addr {tensor_addr} tensor size {tensor_size}') + src_addr = tensor_addr + dest_addr = empty_addr + while src_addr < (tensor_addr + tensor_size): + copy_size = min(empty_size, tensor_addr + tensor_size - src_addr) + + dest_buffer = self.buffer.narrow(0, dest_addr, copy_size) + src_buffer = self.buffer.narrow(0, src_addr, copy_size) + + dest_buffer.data.copy_(src_buffer.data) + + src_addr += copy_size + dest_addr += copy_size + + self._replace_old_address_with_new(tensor_id, empty_addr) + + tensor_index += 1 + + else: + tensor_index += 1 + + empty_addresses = sorted(self.contiguous_sizes.keys()) + + def _replace_old_address_with_new(self, tensor_id, new_address): + + tensor = self.tensor_map[tensor_id] + tensor_size = tensor.numel() + tensor.data = self.buffer.narrow(0, new_address, tensor_size).data + + self._release_tensor(tensor_id) + self._mark_as_occupied(new_address, tensor_size) + + self.tensor_ids[new_address] = tensor_id + self.tensor_map[tensor_id] = tensor + self.tensor_addresses[tensor_id] = new_address + self.tensor_sizes[new_address] = tensor_size + + def _get_new_tensor_address(self, size): + tensor_address = None + for address, contiguous_size in self.contiguous_sizes.items(): + if contiguous_size >= size and \ + (tensor_address is None or \ + contiguous_size < self.contiguous_sizes[tensor_address]): + tensor_address = address + assert tensor_address is not None, "address cannot be None" + return tensor_address + + def _get_new_tensor(self, address, size): + available_contiguous_size = self.contiguous_sizes[address] + + assert size <= available_contiguous_size, \ + f"Tensor numel {size} is large than available contiguous size {available_contiguous_size}" + self.count += 1 + new_tensor = self.buffer.narrow(0, address, size) + tensor_id = id(new_tensor) + self.tensor_addresses[tensor_id] = address + self.tensor_sizes[address] = size + + self.tensor_ids[address] = tensor_id + self.tensor_map[tensor_id] = new_tensor + + self._mark_as_occupied(address, size) + + return new_tensor + + def _largest_contiguous(self): + if len(self.contiguous_sizes) > 0: + return max([size for _, size in self.contiguous_sizes.items()]) + else: + return 0 + + def _mark_as_occupied(self, address, size): + available_contiguous_size = self.contiguous_sizes[address] + del self.contiguous_sizes[address] + + if available_contiguous_size != size: + self.contiguous_sizes[address + size] = available_contiguous_size - size + + self.largest_contiguous = self._largest_contiguous() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/linear.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..e9dd78864cdee838e1bd1ebdfb1963f483c09f52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/linear.py @@ -0,0 +1,178 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release +#after the module execution during forward +#Instead of saving variables using save_for_backward, we save variable ids +#Allowing us to retrieve the variable without creating pointer to it +#Which allows for underlying tensor to be garbage collected +#When partitioned as needed by the Zero Stage 3 optimizer +#TODO instead of patching Linear module, we could patch the ctx.save_for_backward +#ctx.saved_tensors so that this approach works for all nn modules that are built upon +#torch.nn.function. However the issue is that many modules uses C++ implementations +#which does not have pytorch implementation. Eg torch.addmm which acts as a functional +#when implemented outside of torch.autograd.Function + +import math + +import torch +from torch import Tensor +from torch.nn.parameter import Parameter +from torch.nn import init +from torch.nn.modules.module import Module +from deepspeed.runtime.utils import noop_decorator +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator + + +def print_rank_0(message, debug=False, force=False): + if dist.get_rank() == 0 and (debug or force): + print(message) + + +try: + autocast_custom_fwd = get_accelerator().amp().custom_fwd + autocast_custom_bwd = get_accelerator().amp().custom_bwd +except (ImportError, AttributeError) as exp: + autocast_custom_fwd = noop_decorator + autocast_custom_bwd = noop_decorator + + +class LinearFunctionForZeroStage3(torch.autograd.Function): + + # Note that both forward and backward are @staticmethods + @staticmethod + @autocast_custom_fwd + # bias is an optional argument + def forward(ctx, input, weight, bias=None): + + ctx.save_for_backward(input, weight, bias) + + if input.dim() == 2 and bias is not None: + # fused op is marginally faster + ret = torch.addmm(bias, input, weight.t()) + else: + output = input.matmul(weight.t()) + if bias is not None: + output += bias + ret = output + + return ret + + # This function has only a single output, so it gets only one gradient + @staticmethod + @autocast_custom_bwd + def backward(ctx, grad_output): + # This is a pattern that is very convenient - at the top of backward + # unpack saved_tensors and initialize all gradients w.r.t. inputs to + # None. Thanks to the fact that additional trailing Nones are + # ignored, the return statement is simple even when the function has + # optional inputs. + input, weight, bias = ctx.saved_tensors + + grad_input = grad_weight = grad_bias = None + + #print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}") + # These needs_input_grad checks are optional and there only to + # improve efficiency. If you want to make your code simpler, you can + # skip them. Returning gradients for inputs that don't require it is + # not an error. + if ctx.needs_input_grad[0]: + #print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}") + grad_input = grad_output.matmul(weight) + #print(f"Computed grad input {grad_input.shape}") + if ctx.needs_input_grad[1]: + #print("Computing grad weight") + dim = grad_output.dim() + if dim > 2: + grad_weight = grad_output.reshape(-1, + grad_output.shape[-1]).t().matmul(input.reshape(-1, input.shape[-1])) + else: + grad_weight = grad_output.t().matmul(input) + #print(f"Computed grad weight grad_weight {grad_weight.shape}") + if bias is not None and ctx.needs_input_grad[2]: + #print("Computing grad bias") + if dim > 2: + grad_bias = grad_output.sum([i for i in range(dim - 1)]) + else: + grad_bias = grad_output.sum(0) + #print("Done computing grad bias") + #print("needs bias") + #print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}") + return grad_input, grad_weight, grad_bias + + +def zero3_linear_wrap(input, weight, bias=None): + if bias is None: + return LinearFunctionForZeroStage3.apply(input, weight) + else: + return LinearFunctionForZeroStage3.apply(input, weight, bias) + + +class LinearModuleForZeroStage3(Module): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`. + The weights are pre-transposed and stored as A^T instead of transposing during each + forward. Memory savings proportional to the parameter size. + + Args: + in_features: size of each input sample + out_features: size of each output sample + bias: If set to ``False``, the layer will not learn an additive bias. + Default: ``True`` + + Shape: + - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of + additional dimensions and :math:`H_{in} = \text{in\_features}` + - Output: :math:`(N, *, H_{out})` where all but the last dimension + are the same shape as the input and :math:`H_{out} = \text{out\_features}`. + + Attributes: + weight: the learnable weights of the module of shape + :math:`(\text{out\_features}, \text{in\_features})`. The values are + initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where + :math:`k = \frac{1}{\text{in\_features}}` + bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. + If :attr:`bias` is ``True``, the values are initialized from + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where + :math:`k = \frac{1}{\text{in\_features}}` + + Examples:: + + >>> m = nn.Linear(20, 30) + >>> input = torch.randn(128, 20) + >>> output = m(input) + >>> print(output.size()) + torch.Size([128, 30]) + """ + __constants__ = ['in_features', 'out_features'] + in_features: int + out_features: int + weight: Tensor + + def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: + super(LinearModuleForZeroStage3, self).__init__() + print("Building ZeRO module") + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter(torch.Tensor(out_features, in_features)) + if bias: + self.bias = Parameter(torch.Tensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self) -> None: + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def forward(self, input: Tensor) -> Tensor: + return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias) + + def extra_repr(self) -> str: + return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias + is not None) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics.py new file mode 100644 index 0000000000000000000000000000000000000000..1e5c9396be1dbe49399c195c1de40f691a6c4e20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics.py @@ -0,0 +1,467 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import sys +from typing import List + +import deepspeed +import torch +from deepspeed import comm as dist +from deepspeed.runtime.zero.utils import is_zero_param +from deepspeed.runtime.zero.mics_utils import (MiCS_CommGroups, create_mics_comm_groups, scale_tensors) +from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload +from deepspeed.runtime.zero.partition_parameters import Init, AllGatherCoalescedHandle, ZeroParamStatus +from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 +from deepspeed.utils import instrument_w_nvtx, log_dist +from deepspeed.accelerator import get_accelerator +from torch import Tensor +from torch.nn import Parameter + + +def has_hierarchical_all_gather_groups(comm_groups: MiCS_CommGroups): + result = False + if comm_groups.param_intra_node_group is not None and comm_groups.param_inter_node_shard_group is not None: + result = True + return result + + +class MiCS_AllGatherCoalescedHandle(AllGatherCoalescedHandle): + """ This handle assumes that no need to + copy data out from a contiguous tensor + """ + + def __init__(self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int) -> None: + super().__init__(allgather_handle, params, partitions, world_size) + + def wait(self) -> None: + """ + """ + # let the current stream to op + try: + print("HANDLE", self.allgather_handle) + instrument_w_nvtx(self.allgather_handle.wait)() + except (ValueError, RuntimeError) as e: + log_dist( + f"WARNING: Runtime Error while waiting the collective all-gather, possibly due to the _IllegalWork", + ranks=[0]) + log_dist(f"Error message: {e}", ranks=[0]) + + if self.complete: + return + + for _, param in enumerate(self.params): + assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" + param.ds_status = ZeroParamStatus.AVAILABLE + + self.complete = True + + +class MiCS_Init(Init): + + def __init__(self, + module=None, + data_parallel_group=None, + sequence_data_parallel_group=None, + mem_efficient_linear=True, + remote_device=None, + pin_memory=False, + config_dict_or_path=None, + config=None, + enabled=True, + dtype=None, + mpu=None): + """A context manager to partition the model parameters during the model + construction with MiCS partition strategy. Model states are partitioned + to the number of devices specified via ``mics_shard_size`` field in the + deepspeed config json file. The context manager also introduces + hierarchical communication method to reduce the cost of inter-node + communications, which can be enabled with + ``mics_hierarchical_params_gather`` field in deepspeed config. + + Args: + module (``torch.nn.Module``, optional): If provided, partition the model as + if it was constructed in the context. + data_parallel_group (``deepspeed.comm`` process group, optional): + The group of processes to partition among. Defaults to all processes. + mem_efficient_linear (bool, optional): Replace + torch.nn.functional.linear with an implementation that allows + DeepSpeed to partition parameters. Defaults to ``True``. + remote_device (string, optional): The initial device to store model + weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU + memory. The model may still be moved to GPU based on the + offload settings for training. Defaults to param offload device if a config is + defined, otherwise GPU. + pin_memory (bool, optional): Potentially increase performance by + using pinned memory for model weights. ``remote_device`` must be + ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``. + config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration + for swapping fp16 params to NVMe. + config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead. + enabled (bool, optional): If ``False``, this context has no + effect. Defaults to ``True``. + dtype (``dtype``, optional): Can be used to change the data type of the parameters. + Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None`` + mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}. + + This context follows the same logic as ``deepspeed.zero.Init()``, but + with the modification for partition size of each parameter. + + Examples + -------- + + #. Allocate a model and partition it among all processes: + + .. code-block:: python + # the config_dict_or_path is required to let the context manager know + # how partition the parameters. + # The configuration has to include the field ``mics_shard_size`` + with deepspeed.zero.MiCS_Init(config_dict_or_path=ds_config): + model = MyLargeModel() + + + #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes: + + .. code-block:: python + + with deepspeed.zero.MiCS_Init(data_parallel_group=mpu.get_data_parallel_group(), + remote_device="cpu", + pin_memory=True + config_dict_or_path=ds_config): + model = MyLargeModel() + + + #. Partition an already-allocated model in CPU memory: + + .. code-block:: python + + model = deepspeed.zero.MiCS_Init(module=model, + config_dict_or_path=ds_config) + """ + + assert config_dict_or_path is not None, "Must provide configuration for MiCS Initialization" + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu) + if not dist.is_initialized(): + dist.init_distributed() + assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm" + + if data_parallel_group is None and sequence_data_parallel_group is None: + ds_process_group = dist.get_world_group() + elif sequence_data_parallel_group is not None: + ds_process_group = sequence_data_parallel_group + elif data_parallel_group is not None: + ds_process_group = data_parallel_group + else: # both given + raise ValueError( + "Both 'data_parallel_group' and 'sequence_data_parallel_group' were specified. Please provide only one of these arguments." + ) + + self.mics_comm_groups = create_mics_comm_groups( + _ds_config.mics_shard_size, + ds_process_group, + hierarchical_allgather=_ds_config.mics_hierarchial_params_gather, + mpu=mpu) + + super().__init__(module, data_parallel_group, mem_efficient_linear, remote_device, pin_memory, + config_dict_or_path, config, enabled, dtype, mpu) + + def _convert_to_deepspeed_param(self, param): + super()._convert_to_deepspeed_param(param) + # attach communication groups to every param + param.comm = self.mics_comm_groups + + # record existing all_gather_coalesced implementation + # so that we can fallback later + old_all_gather_coalesced = param.all_gather_coalesced + + def _param_all_gather_coalesced(params, param_buffers=None, **kwargs): + """""" + mics_comm_groups: MiCS_CommGroups = params[0].comm + hierarchical_all_gather = has_hierarchical_all_gather_groups(mics_comm_groups) + if dist.has_coalescing_manager() and hierarchical_all_gather: + return self._hierarchical_all_gather_params(params, param_buffers) + elif dist.has_coalescing_manager(): + return self._flat_all_gather_with_coalescing_manager(params, param_buffers) + else: + return old_all_gather_coalesced(params, **kwargs) + + # change the all_gather_coalesced method + param.all_gather_coalesced = _param_all_gather_coalesced + + def _pre_all_gather(self, params, params_buffers=None): + # fetches from nvme if the partition is not available and in nvme + self._ensure_availability_of_partitioned_params(params) + + for param in params: + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(param.ds_summary()) + param.ds_status = ZeroParamStatus.INFLIGHT + + # ensure that each rank has params in same order. the allgather + # is done by flattening the parameter list into a single tensor that + # can be allgathered in a single call - this means that if each rank + # gives a list of the same parameters in a different order we will + # silently get incorrect parameter values, and have very difficult + # to debug correctness issues. + params = sorted(params, key=lambda p: p.ds_id) + return params, params_buffers + + def _flat_all_gather_with_coalescing_manager(self, params, params_buffers=None): + """""" + # must have to change the status of the param + # and ensure they are on the device + params, params_buffers = self._pre_all_gather(params, params_buffers) + + mics_comm_groups: MiCS_CommGroups = params[0].comm + param_shard_size = mics_comm_groups.param_shard_size + + output_tensors = [] + input_tensors = [] + for i, p in enumerate(params): + t_size = p.ds_tensor.ds_numel * param_shard_size + if params_buffers is not None and params_buffers[i] is not None: + assert params_buffers[i].numel( + ) == t_size, f'params_to_gather_buffers[{i}] size {params_buffers[i].numel()} does not match with t_size {t_size}' + flat_out = params_buffers[i] + else: + flat_out = torch.empty(t_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1) + output_tensors.append(flat_out) + _flat_input = p.ds_tensor.data.view(-1) + input_tensors.append(_flat_input) + + all_gather_handle = dist.all_gather_coalesced(output_tensors, + input_tensors, + group=mics_comm_groups.param_shard_group, + async_op=True) + + for idx, param in enumerate(params): + param.data = output_tensors[idx].narrow(0, 0, param.ds_numel).view(param.ds_shape).data + + return MiCS_AllGatherCoalescedHandle(allgather_handle=all_gather_handle, + params=params, + partitions=[], + world_size=param_shard_size) + + def _hierarchical_all_gather_params(self, params, params_buffers=None): + """""" + params, params_buffers = self._pre_all_gather(params, params_buffers) + + mics_comm_groups: MiCS_CommGroups = params[0].comm + local_rank = dist.get_rank(group=mics_comm_groups.param_intra_node_group) + inter_node_comm_group = mics_comm_groups.param_inter_node_shard_group + intra_node_comm_group = mics_comm_groups.param_intra_node_group + param_shard_size = mics_comm_groups.param_shard_size + + inter_node_size = dist.get_world_size(group=inter_node_comm_group) + intra_node_size = dist.get_world_size(group=intra_node_comm_group) + param_tensors = [] + for i, p in enumerate(params): + param_size = p.ds_tensor.ds_numel * param_shard_size + if params_buffers is not None and params_buffers[i] is not None: + assert params_buffers[i].numel( + ) == param_size, f'param_buffers[{i}] size {params_buffers[i].numel()} does not match with param_size {param_size}' + param_tensor = params_buffers[i] + else: + param_tensor = torch.empty(param_size, dtype=p.dtype, device=self.local_device, + requires_grad=False).view(-1) + param_tensors.append(param_tensor) + + # inter node all-gather + inter_outputs = [] + inter_inputs = [] + for i, p in enumerate(params): + inter_size = p.ds_tensor.ds_numel * inter_node_size + _out = param_tensors[i].narrow(0, local_rank * inter_size, inter_size) + inter_outputs.append(_out) + inter_inputs.append(p.ds_tensor.data.view(-1).to(self.local_device)) + # sync enqueue + dist.all_gather_coalesced(inter_outputs, inter_inputs, group=inter_node_comm_group, async_op=False) + + # intra node all-gather + intra_outputs = [] + intra_inputs = [] + for i, p in enumerate(params): + # partition param into multiple chunks for allgather + # because inter-node all-gather outputs are in a continues memory + # while in param memory, those inter-node data are placed in different + # location. + # each chunk is an intra-node output + param_chunk = param_tensors[i].view( + (inter_node_size, intra_node_size, p.ds_tensor.ds_numel)).narrow(1, local_rank, 1) + param_chunk.copy_(inter_outputs[i].detach().clone().view(param_chunk.size())) + output_chunks = torch.chunk(param_tensors[i], inter_node_size) + for j, _out in enumerate(output_chunks): + intra_chunk_size = intra_node_size * p.ds_tensor.ds_numel + local_offset = local_rank * p.ds_tensor.ds_numel + _in = param_tensors[i].narrow(0, j * intra_chunk_size + local_offset, p.ds_tensor.ds_numel) + intra_outputs.append(_out) + intra_inputs.append(_in) + + all_gather_handle = dist.all_gather_coalesced(intra_outputs, + intra_inputs, + group=intra_node_comm_group, + async_op=True) + for i, param in enumerate(params): + param.data = param_tensors[i].narrow(0, 0, param.ds_numel).view(param.ds_shape).data + + return MiCS_AllGatherCoalescedHandle( + allgather_handle=all_gather_handle, + params=params, + partitions=[], + world_size=param_shard_size, + ) + + def get_partition_dp_group(self, param): + return param.comm.param_shard_group + + def get_partition_rank(self): + return self.mics_comm_groups.param_shard_rank + + @property + def num_partitions(self): + return self.mics_comm_groups.param_shard_size + + +class MiCS_Offload(DeepSpeedZeRoOffload): + """ Wrapper to change the behavior for parameter sharding + """ + + def _convert_to_zero_parameters(self, ds_config, module, mpu): + """ overload the parent class function for convert the parameters + + """ + log_dist(f'Convert to zero parameters from MiCS Offload manager', ranks=[0]) + non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] + if non_zero_params: + zero_params = [p for p in module.parameters() if is_zero_param(p)] + if zero_params: + zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) + else: + group = None + if mpu: + group = mpu.get_data_parallel_group() + + MiCS_Init(module=module, + data_parallel_group=group, + dtype=self.dtype, + config_dict_or_path=ds_config, + remote_device=self.offload_device, + pin_memory=self.offload_param_pin_memory, + mpu=mpu) + + +class MiCS_Optimizer(DeepSpeedZeroOptimizer_Stage3): + """ + MiCS Optimizer + """ + + def __init__(self, + module, + init_optimizer, + timers, + ds_config, + static_loss_scale=1, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=True, + contiguous_gradients=True, + reduce_bucket_size=500000000, + prefetch_bucket_size=50000000, + max_reuse_distance=1000000000, + max_live_parameters=1000000000, + param_persistence_threshold=100000, + model_persistence_threshold=sys.maxsize, + dp_process_group=None, + reduce_scatter=True, + overlap_comm=False, + offload_optimizer_config=None, + offload_param_config=None, + sub_group_size=1000000000000, + offload_ratio=0.0, + mpu=None, + clip_grad=0, + gradient_accumulation_dtype=torch.float16, + communication_data_type=torch.float16, + postscale_gradients=True, + gradient_predivide_factor=1, + gradient_accumulation_steps=1, + elastic_checkpoint=False, + aio_config=None): + + log_dist("Init MiCS optimizer", ranks=[0]) + super().__init__(module, init_optimizer, timers, ds_config, static_loss_scale, dynamic_loss_scale, + dynamic_loss_args, verbose, contiguous_gradients, reduce_bucket_size, prefetch_bucket_size, + max_reuse_distance, max_live_parameters, param_persistence_threshold, + model_persistence_threshold, dp_process_group, reduce_scatter, overlap_comm, + offload_optimizer_config, offload_param_config, sub_group_size, offload_ratio, mpu, clip_grad, + gradient_accumulation_dtype, communication_data_type, postscale_gradients, + gradient_predivide_factor, gradient_accumulation_steps, elastic_checkpoint, aio_config) + first_param = next(module.parameters()) + # overload the dp_process_group and partition_count + assert hasattr(first_param, "comm"), " ".join([ + "Sharded parameters don't have the MiCS_CommGroups attached.", + "Might due to the use of deepspeed.zero.Init context for initializing the weights.", + "To use MiCS sharding, please use deepspeed.zero.MiCS_Init instead for initializing parameter." + ]) + self.dp_process_group = first_param.comm.param_shard_group + self.partition_count = first_param.comm.param_shard_size + + def initialize_ds_offload( + self, + *args, + **kwargs, + ): + return MiCS_Offload(*args, **kwargs) + + def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: + grad_buffers = super().partition_grads(params_to_release, grad_partitions) + # perform all-reduce among replication groups + # the function will perform accumulation boundary check + self.allreduce_mics_shard_grads(params_to_release, grad_buffers) + + @instrument_w_nvtx + def allreduce_mics_shard_grads(self, params, partitioned_grads_buffers: List[Tensor]): + """ + """ + # TODO: improve the condition check + if not self.is_gradient_accumulation_boundary or \ + len(partitioned_grads_buffers) == 0: + return + + mics_comm_groups: MiCS_CommGroups = params[0].comm + param_repli_group = mics_comm_groups.param_repli_group + param_repli_size = mics_comm_groups.param_repli_size + + if param_repli_size is None or param_repli_size <= 1: + return + if not get_accelerator().on_accelerator(partitioned_grads_buffers[0]): + raise RuntimeError("Local sharding has no support for CPU offloading") + + if dist.has_all_reduce_coalesced(): + scale_tensors(partitioned_grads_buffers, param_repli_size) + dist.all_reduce_coalesced(tensors=partitioned_grads_buffers, group=param_repli_group) + else: + # manually coalescing all-reduce + aggregated_buffer: Tensor = torch.cat(partitioned_grads_buffers) + aggregated_buffer.div_(param_repli_size) + dist.all_reduce(aggregated_buffer, group=param_repli_group) + offset = 0 + for grad_buff in partitioned_grads_buffers: + grad_buff.view(-1).copy_(aggregated_buffer.narrow(0, offset, grad_buff.numel())) + offset += grad_buff.numel() + + def load_state_dict(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False, + checkpoint_folder=None, + load_serial=None): + r""" Loading the ZeRO-3/MiCS partitioned checkpoints + Because the self.dp_process_group is replaced with the communicator for + partition group we can call the load_state_dict logic from ZeRO-3. + """ + super().load_state_dict(state_dict_list, load_optimizer_states, load_from_fp32_weights, checkpoint_folder) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics_utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..06b83160bd6c4cd93f5250a12c67f5abda01d920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/mics_utils.py @@ -0,0 +1,203 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import os +from dataclasses import dataclass +from typing import List + +import numpy as np +import torch +from torch import Tensor + +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from deepspeed.utils import logger + + +def _log_rank0(msg): + if dist.get_rank() == 0: + logger.info(msg) + + +@torch.jit.script +def scale_tensors(tensors: List[Tensor], scale: int): + for t in tensors: + t.div_(scale) + + +@dataclass +class MiCS_CommGroups: + """""" + param_shard_group = None + param_shard_size = -1 + param_shard_rank = -1 + + param_repli_group = None + param_repli_size = -1 + param_repli_rank = -1 + + param_intra_node_group = None + param_inter_node_shard_group = None + + +def create_mics_comm_groups( + shard_size, + dp_group, + hierarchical_allgather=False, + mpu=None, +): + """ + create shard-group, replicate-group from config_file + TODO: consider broadcast the config from rank0 + + Returns: + MiCS_CommGroups + """ + # env var for debugging purpose + ndevices_per_node = int(os.environ.get("NDEV_PER_NODE", get_accelerator().device_count())) + _log_rank0(f'creating MiCS communication groups with per node device size {ndevices_per_node}') + groups = MiCS_CommGroups() + + if mpu is not None: + assert dp_group == mpu.get_data_parallel_group() + + # full size of the world + world_size = dist.get_world_size() + # global rank + global_rank = dist.get_rank() + + config = _generate_mics_config(world_size, ndevices_per_node, shard_size, 1) + ranks_of_shard_group = config['shard_groups'] + ranks_of_repli_group = config['replicate_groups'] + if len(ranks_of_repli_group) == 0: + assert len(ranks_of_shard_group) == 1, "replicate groups are empty only for single shard group" + for r in ranks_of_shard_group[0]: + ranks_of_repli_group.append([r]) + + # for simplicity + assert _sizes_all_same(ranks_of_repli_group), "replicate groups must have the same size" + assert _sizes_all_same(ranks_of_shard_group), "shard groups must have the same size" + + assert sum([len(g) for g in ranks_of_shard_group]) == dist.get_world_size(), "all sharded ranks " + if len(ranks_of_shard_group) > 1: # if only shard on one group then no need for replicate groups + assert len(ranks_of_shard_group) == len( + ranks_of_repli_group[0]), "number of shard groups must equal to the size of each replicate group" + + global_rank = dist.get_rank() + # create shard groups + for shard_ranks in ranks_of_shard_group: + _group = dist.new_group(shard_ranks) + if global_rank in shard_ranks: + groups.param_shard_group = _group + groups.param_shard_size = len(shard_ranks) + groups.param_shard_rank = dist.get_rank(_group) + logger.info(f'rank {global_rank}, shard group' + f' {groups.param_shard_rank}/{dist.get_world_size(group=_group)}') + + # create replicate groups + for repli_ranks in ranks_of_repli_group: + if len(repli_ranks) > 1: + _group = dist.new_group(repli_ranks) + if global_rank in repli_ranks: + groups.param_repli_group = _group + groups.param_repli_size = len(repli_ranks) + groups.param_repli_rank = dist.get_rank(group=_group) + logger.info(f'rank {global_rank} ' + f'replicate group {groups.param_repli_rank}/{dist.get_world_size(group=_group)}') + else: + groups.param_repli_group = None + groups.param_repli_size = 1 + groups.param_repli_rank = 0 + logger.info(f'rank {global_rank} replicate group 0/1') + + # assign shard group size as world size + assert groups.param_shard_size == len(ranks_of_shard_group[0]) + + if hierarchical_allgather: + # create hierarchy inter-node, intra-node groups + # n_span_nodes = config['shard_span'] + n_span_nodes = config['span_nodes'] + assert n_span_nodes > 1, "sharding spans on single node, no need for hierarchy allgather" + assert len(ranks_of_shard_group[0]) % n_span_nodes == 0 + + n_gpu_per_node = len(ranks_of_shard_group[0]) // n_span_nodes + intra_node_ranks_group = [] + inter_node_ranks_group = [] + for shard_group in ranks_of_shard_group: + _intra_node_ranks = [] + for i in range(0, len(shard_group), n_gpu_per_node): + _intra_node_ranks.append(shard_group[i:i + n_gpu_per_node]) + _inter_node_ranks = [] + for i in range(n_gpu_per_node): + _ranks = [_g[i] for _g in _intra_node_ranks] + _inter_node_ranks.append(_ranks) + + intra_node_ranks_group.append(_intra_node_ranks) + inter_node_ranks_group.append(_inter_node_ranks) + + _log_rank0(f"create for hierarchy all-gather groups: intra nodes {intra_node_ranks_group}") + _log_rank0(f"create for hierarchy all-gather groups: inter nodes {inter_node_ranks_group}") + + # create communicators + for shard_group in intra_node_ranks_group: + for intra_node_ranks in shard_group: + _group = dist.new_group(intra_node_ranks) + if global_rank in intra_node_ranks: + groups.param_intra_node_group = _group + _log_rank0(f'create group for intra node ranks {intra_node_ranks}') + + for shard_group in inter_node_ranks_group: + for inter_node_ranks in shard_group: + _group = dist.new_group(inter_node_ranks) + if global_rank in inter_node_ranks: + groups.param_inter_node_shard_group = _group + _log_rank0(f'create group for inter node ranks {inter_node_ranks}') + return groups + + +def _generate_mics_config(world_size, ndev_per_node, shard_size, pp_size=1): + """Generating the configuration for sharding This shard config generation assume + that the pipeline stages are partitioned in order, i.e., first ranks + hold the stage0, etc. + + Args: + + shard_size (int): zero3 data-parallel shard size, FIXME: + change the name later + + pp_size (int): pipeline parallel size, currently, only work with + pipeline parallelism + zero + + """ + assert world_size % pp_size == 0 + assert (world_size // pp_size) % shard_size == 0, \ + f"dp group size is not dividable by dp_shard_size, "\ + f" (world_size {world_size}, pp_size {pp_size}, dp_shard_size {shard_size})" + + config = {} + shard_groups = np.arange(world_size).reshape(-1, shard_size) + replicate_groups = [] + for i in range(shard_size): + same_shard_ranks = shard_groups[:, i].tolist() + n_ranks = len(same_shard_ranks) + replicate_size = n_ranks // pp_size + replicate_groups.extend([same_shard_ranks[j:j + replicate_size] for j in range(0, n_ranks, replicate_size)]) + + config['replicate_groups'] = replicate_groups + config['shard_groups'] = shard_groups.tolist() + config["span_nodes"] = len(shard_groups[0]) // ndev_per_node + return config + + +def _sizes_all_same(groups): + """all groups have same length""" + all_same = True + for g in groups: + if len(g) != len(groups[0]): + return False + return all_same diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/offload_config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/offload_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b7adc13a0ea2b0d6a57c4772b5b96cd5520bf41a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/offload_config.py @@ -0,0 +1,97 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum +from pathlib import Path +from deepspeed.pydantic_v1 import Field, validator +from deepspeed.runtime.config_utils import DeepSpeedConfigModel, pp_int + + +class OffloadDeviceEnum(str, Enum): + """ Enum for valid offload devices """ + none = "none" + cpu = "cpu" + nvme = "nvme" + + +class DeepSpeedZeroOffloadParamConfig(DeepSpeedConfigModel): + """ Set options for parameter offload. Valid only with stage 3. """ + + device: OffloadDeviceEnum = "none" + """ + Device memory to offload model parameters. Supported options are `cpu` and + `nvme`. + """ + + nvme_path: Path = None + """ Filesystem path for NVMe device for parameter offloading. """ + + buffer_count: int = Field(5, ge=0) + """ Number of buffers in buffer pool for parameter offloading to NVMe. """ + + buffer_size: int = Field(pp_int(1e8), ge=0) + """ Size of buffers in buffer pool for parameter offloading to NVMe. """ + + max_in_cpu: int = Field(pp_int(1e9), ge=0) + """ + Number of parameter elements to maintain in CPU memory when offloading to + NVMe is enabled. + """ + + pin_memory: bool = False + """ + Offload to page-locked CPU memory. This could boost throughput at the cost + of extra memory overhead. + """ + + +class DeepSpeedZeroOffloadOptimizerConfig(DeepSpeedConfigModel): + """ Set options for optimizer offload. Valid with stage 1, 2, and 3. """ + + device: OffloadDeviceEnum = "none" + """ + Device memory to offload optimizer state. Supported options are `cpu` and + `nvme`. Optimizer computation is offload to CPU regardless of device option. + """ + + nvme_path: Path = None + """ Filesystem path for NVMe device for optimizer state offloading. """ + + buffer_count: int = Field(4, ge=0) + """ + Number of buffers in buffer pool for optimizer state offloading to NVMe. + This should be at least the number of states maintained per parameter by + the optimizer. For example, Adam optimizer has 4 states (parameter, + gradient, momentum, and variance). + """ + + pin_memory: bool = False + """ + Offload to page-locked CPU memory. This could boost throughput at the cost + of extra memory overhead. + """ + + pipeline_read: bool = False + """ + For tile-based optimizer step processing, overlap read of next tile with + computation of current tile. Used in ZeRO-Infinity. + """ + + pipeline_write: bool = False + """ + For tile-based optimizer step processing, overlap write of previous tile + with computation of current tile. + """ + + fast_init: bool = False + """ Enable fast optimizer initialization when offloading to NVMe. """ + + @validator("pipeline_read", "pipeline_write", always=True) + def set_pipeline(cls, field_value, values): + values["pipeline"] = field_value or values.get("pipeline", False) + return field_value + + ratio: float = Field(1.0, ge=0.0, le=1.0) + """ Percentage of offloaded optimizer states to CPU Adam. Only valid with ZeRO Stage 3.""" diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e79c2647fbe62aafaf67c9a7f2e82be0609958 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py @@ -0,0 +1,487 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +import torch +from collections import OrderedDict +from deepspeed.utils import z3_leaf_module +from deepspeed.runtime.utils import see_memory_usage +from deepspeed.runtime.zero.utils import apply_to_tensors_only, is_zero_param +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.partition_parameters import _init_external_params +from deepspeed.runtime.zero.partition_parameters import * +from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, InflightParamRegistry, iter_params +from deepspeed.accelerator import get_accelerator + +FWD_MODULE_STACK = list() + +# ensure we only warn once, otherwise every iteration will trigger a warning +warned = False + + +#for each tensor in outputs run the forward_function and register backward_function as hook +def _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, outputs): + if type(outputs) is tuple: + touched_outputs = [] + for output in outputs: + touched_output = _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, + output) + touched_outputs.append(touched_output) + return tuple(touched_outputs) + elif type(outputs) is torch.Tensor: + forward_function(outputs) + if outputs.requires_grad: + outputs.register_hook(backward_function) + return outputs + else: + return outputs + + +class ZeROOrderedDict(OrderedDict): + + def __init__(self, parent_module, *args, **kwargs): + """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. + + Args: + parent_module (``collections.OrderedDict``): the collection to replace + """ + + super().__init__(*args, **kwargs) + self._parent_module = parent_module + self._in_forward = False + + def __getitem__(self, key): + param = super().__getitem__(key) + + # Params can be registered as None (e.g., bias) + if param is None: + return param + + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if self._parent_module._parameters._in_forward: + register_external_parameter(FWD_MODULE_STACK[-1], param) + param.all_gather() + print_rank_0(f'Registering external parameter from getter {key} ds_id = {param.ds_id}', force=False) + + return param + + +def _inject_parameters(module, cls): + for module in module.modules(): + if cls == ZeROOrderedDict: + new_param = cls(parent_module=module) + else: + new_param = cls() + + for key, param in module._parameters.items(): + new_param[key] = param + module._parameters = new_param + + +class DeepSpeedZeRoOffload(object): + + def __init__( + self, + module, + timers, + ds_config, + overlap_comm=True, + prefetch_bucket_size=50000000, + max_reuse_distance=1000000000, + max_live_parameters=1000000000, + param_persistence_threshold=100000, + model_persistence_threshold=sys.maxsize, + dp_process_group=None, + offload_param_config=None, + mpu=None, + zero_param_parallel_group=None, + zero_quantized_weights=False, + zero_quantized_nontrainable_weights=False, + ): + + see_memory_usage("DeepSpeedZeRoOffload initialize [begin]", force=True) + + print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False) + + self.module = module + self.timers = timers + self.dtype = list(module.parameters())[0].dtype + self.dp_process_group = dp_process_group + self.offload_device = None + self.offload_param_pin_memory = False + self.zero_param_parallel_group = zero_param_parallel_group + self.zero_quantized_weights = zero_quantized_weights + self.zero_quantized_nontrainable_weights = zero_quantized_nontrainable_weights + + if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: + self.offload_device = offload_param_config.device + self.offload_param_pin_memory = offload_param_config.pin_memory + + self._convert_to_zero_parameters(ds_config, module, mpu) + + for m in module.modules(): + _init_external_params(m) + + _inject_parameters(module, ZeROOrderedDict) + + self.param_numel_persistence_threshold = int(param_persistence_threshold) + self.model_persistence_threshold = int(model_persistence_threshold) + self.persistent_parameters = self.mark_persistent_parameters(self.param_numel_persistence_threshold, + self.model_persistence_threshold) + + self.param_coordinators = {} + self._prefetch_bucket_sz = int(prefetch_bucket_size) + self._max_reuse_distance_in_numel = int(max_reuse_distance) + self._max_available_parameters_in_numel = int(max_live_parameters) + self.__allgather_stream = None if get_accelerator().is_synchronized_device() else get_accelerator().Stream( + ) if overlap_comm else get_accelerator().default_stream() + + if not hasattr(module, "ds_inflight_param_registry"): + module.ds_inflight_param_registry = dict() + # we need two registries, one for training and one for eval. They will be used when creating PartitionedParameterCoordinator + module.ds_inflight_param_registry[True] = InflightParamRegistry() + module.ds_inflight_param_registry[False] = InflightParamRegistry() + self.__inflight_param_registry = module.ds_inflight_param_registry + + self.forward_hooks = [] + self.backward_hooks = [] + self.setup_zero_stage3_hooks() + print_rank_0( + f'Created module hooks: forward = {len(self.forward_hooks)}, backward = {len(self.backward_hooks)}', + force=False) + + see_memory_usage("DeepSpeedZeRoOffload initialize [end]", force=True) + + @instrument_w_nvtx + def partition_all_parameters(self): + """Partitioning Parameters that were not partitioned usually if parameters + of modules whose input parameters do not require grad computation do not + trigger post call and will therefore will remain unpartitioned""" + self.get_param_coordinator(training=self.module.training).release_and_reset_all(self.module) + for param in iter_params(self.module, recurse=True): + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(f"{param.ds_summary()} expected to be released") + + def get_param_coordinator(self, training): + if not training in self.param_coordinators: + self.param_coordinators[training] = PartitionedParameterCoordinator( + prefetch_bucket_sz=self._prefetch_bucket_sz, + max_reuse_distance_in_numel=self._max_reuse_distance_in_numel, + max_available_parameters_in_numel=self._max_available_parameters_in_numel, + allgather_stream=self.__allgather_stream, + inflight_param_registry=self.__inflight_param_registry[training], + prefetch_nvme=self.offload_device == OffloadDeviceEnum.nvme, + timers=self.timers, + zero_quantized_weights=self.zero_quantized_weights, + zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights, + ) + + return self.param_coordinators[training] + + def empty_partition_cache(self): + self.partition_all_parameters() + + def _convert_to_zero_parameters(self, ds_config, module, mpu): + non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] + if non_zero_params: + zero_params = [p for p in module.parameters() if is_zero_param(p)] + if zero_params: + zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) + else: + group = None + if mpu: + group = mpu.get_data_parallel_group() + + Init(module=module, + data_parallel_group=group, + dtype=self.dtype, + config_dict_or_path=ds_config, + remote_device=self.offload_device, + pin_memory=self.offload_param_pin_memory, + mpu=mpu, + zero_param_parallel_group=self.zero_param_parallel_group, + zero_quantized_weights=self.zero_quantized_weights, + zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights) + + def destroy(self): + self._remove_module_hooks() + + def _remove_module_hooks(self): + num_forward_hooks = len(self.forward_hooks) + num_backward_hooks = len(self.backward_hooks) + + for hook in self.forward_hooks: + hook.remove() + + for hook in self.backward_hooks: + hook.remove() + + print_rank_0(f'Deleted module hooks: forward = {num_forward_hooks}, backward = {num_backward_hooks}', + force=False) + + def setup_zero_stage3_hooks(self): + self.hierarchy = 0 + + #reset step if in inference mode + @instrument_w_nvtx + def _end_of_forward_hook(module, *args): + + if not torch._C.is_grad_enabled(): + self.get_param_coordinator(training=False).reset_step() + + #likely one of them should be enough but just to be safe + self._register_hooks_recursively(self.module) + self.module.register_forward_hook(_end_of_forward_hook) + + # Add top module to stack trace + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(self.module) + + def mark_persistent_parameters(self, param_threshold, model_threshold): + persistent_params = [] + total_persistent_parameters = 0 + params_count = 0 + for name, param in self.module.named_parameters(recurse=True): + if param.ds_numel + total_persistent_parameters > model_threshold: + continue + + if param.ds_numel <= param_threshold: + params_count += 1 + param.ds_persist = True + persistent_params.append(param) + total_persistent_parameters += param.ds_numel + + print_rank_0( + f"Parameter Offload: Total persistent parameters: {total_persistent_parameters} in {params_count} params", + force=True) + + return persistent_params + + def _register_hooks_recursively(self, module, count=[0]): + my_count = count[0] + module.id = my_count + + #print(f"{module.__class__} : {module.id}") + + if z3_leaf_module(module): + for param in module.parameters(): + param.ds_z3_leaf_module = module + else: + for child in module.children(): + count[0] = count[0] + 1 + self._register_hooks_recursively(child, count=count) + + @instrument_w_nvtx + def _pre_forward_module_hook(module, *args): + self.pre_sub_module_forward_function(module) + + @instrument_w_nvtx + def _post_forward_module_hook(module, input, output): + + global FWD_MODULE_STACK + FWD_MODULE_STACK.pop() + if output is None: + output = [] + elif not isinstance(output, (list, tuple)): + if torch.is_tensor(output): + output = [output] + else: + #print(f'got UNKNOWN type {type(output)}') + outputs = [] + output = output if isinstance(output, dict) else vars(output) + for name, val in output.items(): + if not name.startswith('__') and torch.is_tensor(val): + outputs.append(val) + output = outputs + + for item in filter(lambda item: is_zero_param(item) or hasattr(item, 'ds_param_alias'), output): + key = id(item) if hasattr(item, 'ds_id') else id(item.ds_param_alias) + actual_external_param = item if hasattr(item, 'ds_id') else item.ds_param_alias + + if not any(key in m._external_params for m in FWD_MODULE_STACK): + actual_external_param.is_external_param = True + module_to_register = FWD_MODULE_STACK[-1] + register_external_parameter(module_to_register, actual_external_param) + print_rank_0( + f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {actual_external_param.ds_id}.', + force=False) + + # It's possible that the parameter was already external to the completed module. If so, remove it the + # registration as it will be covered by the outer module instead. + if key in module._external_params: + print_rank_0( + f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {actual_external_param.ds_id}', + force=False) + unregister_external_parameter(module, actual_external_param) + + actual_external_param.all_gather() + + self.post_sub_module_forward_function(module) + + def _bwd_hook_unexpected_inputs_msg(value): + return f"A module has unknown inputs or outputs type ({type(value)}) and the tensors embedded in it cannot be detected. " \ + "The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and " \ + "output tensors and therefore may not get triggered properly." + + def _pre_backward_module_hook(module, inputs, output): + + if not hasattr(module, "pre_bwd_fn"): + + @instrument_w_nvtx + def _run_before_backward_function(sub_module): + # some models (e.g. Albert) may run multiple forwards on the same layer in a loop + # before doing backwards, so each backward will need a pre-fetch - using reference + # counting to support this scenario + #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") + if sub_module.applied_pre_backward_ref_cnt > 0: + self.pre_sub_module_backward_function(sub_module) + sub_module.applied_pre_backward_ref_cnt -= 1 + #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") + + class PreBackwardFunctionForModule(torch.autograd.Function): + + @staticmethod + def forward(ctx, outputs): + # Capture `module` and _run_before_backward_function + ctx.module = module + ctx.pre_backward_function = _run_before_backward_function + if not hasattr(ctx.module, "applied_pre_backward_ref_cnt"): + ctx.module.applied_pre_backward_ref_cnt = 0 + ctx.module.applied_pre_backward_ref_cnt += 1 + outputs = outputs.detach() + return outputs + + @staticmethod + def backward(ctx, *args): + ctx.pre_backward_function(ctx.module) + return args + + module.pre_bwd_fn = PreBackwardFunctionForModule + + return apply_to_tensors_only(module.pre_bwd_fn.apply, + output, + warning_msg_fn=_bwd_hook_unexpected_inputs_msg) + + #This is an alternate to doing _post_backward_module_hook + #it uses tensor.register_hook instead of using torch.autograd.Function + def _alternate_post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + #print(f"Before Forward {module.__class__.__name__}") + + def _run_after_backward_hook(*unused): + module.ds_grads_remaining = module.ds_grads_remaining - 1 + if module.ds_grads_remaining == 0: + #print(f"After backward {module.__class__.__name__}") + self.post_sub_module_backward_function(module) + + def _run_before_forward_function(input): + if input.requires_grad: + module.ds_grads_remaining += 1 + + return _apply_forward_and_backward_to_tensors_only(module, _run_before_forward_function, + _run_after_backward_hook, inputs) + + def _post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + if not hasattr(module, "post_bwd_fn"): + + @instrument_w_nvtx + def _run_after_backward_function(sub_module): + if sub_module.ds_grads_remaining == 0: + self.post_sub_module_backward_function(sub_module) + + class PostBackwardFunctionModule(torch.autograd.Function): + + @staticmethod + def forward(ctx, output): + ctx.module = module + if output.requires_grad: + #TODO SOME TIMES post backward does not seem to be triggered debug in detail + #Should only cause increase in memory not correctness issue + #if output.grad_fn.__class__.__name__ == 'ViewBackward': + # ctx.view=True + # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") + #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." + #if module.ds_grads_remaining == 0: + # print(f"Before Forward: {ctx.module.__class__.__name__}") + module.ds_grads_remaining += 1 + ctx.post_backward_function = _run_after_backward_function + output = output.detach() + return output + + @staticmethod + def backward(ctx, *args): + ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 + if ctx.module.ds_grads_remaining == 0: + ctx.post_backward_function(ctx.module) + return args + + module.post_bwd_fn = PostBackwardFunctionModule + + return apply_to_tensors_only(module.post_bwd_fn.apply, + inputs, + warning_msg_fn=_bwd_hook_unexpected_inputs_msg) + + # Pre forward hook + self.forward_hooks.append(module.register_forward_pre_hook(_pre_forward_module_hook)) + + # Post forward hook + self.forward_hooks.append(module.register_forward_hook(_post_forward_module_hook)) + + # Pre backward hook + self.backward_hooks.append(module.register_forward_hook(_pre_backward_module_hook)) + + # post backward hook + self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook)) + + @torch.no_grad() + def pre_sub_module_forward_function(self, sub_module): + see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", force=False) + + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(sub_module) + + param_coordinator = self.get_param_coordinator(training=sub_module.training) + param_coordinator.trace_prologue(sub_module) + if param_coordinator.is_record_trace(): + param_coordinator.record_module(sub_module) + param_coordinator.fetch_sub_module(sub_module, forward=True) + + see_memory_usage(f"Before sub module function {sub_module.__class__.__name__} after fetch", force=False) + + @torch.no_grad() + def post_sub_module_forward_function(self, sub_module): + see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release", + force=False) + + param_coordinator = self.get_param_coordinator(training=sub_module.training) + param_coordinator.release_sub_module(sub_module) + + see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release", + force=False) + + @torch.no_grad() + def pre_sub_module_backward_function(self, sub_module): + assert sub_module.training, "backward pass is invalid for module in evaluation mode" + param_coordinator = self.get_param_coordinator(training=True) + param_coordinator.trace_prologue(sub_module) + if param_coordinator.is_record_trace(): + param_coordinator.record_module(sub_module) + param_coordinator.fetch_sub_module(sub_module, forward=False) + + @torch.no_grad() + def post_sub_module_backward_function(self, sub_module): + assert sub_module.training, "backward pass is invalid for module in evaluation mode" + see_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release", + force=False) + + self.get_param_coordinator(training=True).release_sub_module(sub_module) + + see_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release", + force=False) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partition_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partition_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..c8099791f882bab055e397734fa7d3f36641acff --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partition_parameters.py @@ -0,0 +1,2184 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import os +import types +from typing import Callable, Iterable +from enum import Enum +import functools +import itertools +from typing import List +from collections import defaultdict +import logging +import torch +from torch import Tensor +from deepspeed import comm as dist +from torch.nn import Module +from torch.nn import Parameter + +from .linear import zero3_linear_wrap + +from deepspeed.utils import groups +import deepspeed +from ..utils import see_memory_usage +from deepspeed.runtime.zero.config import DeepSpeedZeroConfig +from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks, is_zero_param +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.config_utils import get_config_default +from deepspeed.utils import instrument_w_nvtx, logger +from deepspeed.comm.comm import init_distributed +from deepspeed.utils.debug import (debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name, + debug_param2name_id, debug_param2name_id_shape_status) +from deepspeed.accelerator import get_accelerator +from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus +from deepspeed.inference.quantization.utils import _quantize_param, WEIGHT_QUANTIZATION_LAYERS, wrap_quantized_functional, wrap_load_from_state_dict + +partitioned_param_data_shape = [0] +zero_init_context = 0 +top_level_context = None + + +class NoGatherHandle: + + def __init__(self, param: Parameter) -> None: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to be available") + + if hasattr(param.ds_tensor, "ds_quant_scale"): + param.data = Init.quantizer_module.dequantize(param.ds_tensor.data, param.ds_tensor.ds_quant_scale).to( + device=get_accelerator().current_device_name(), non_blocking=True).view(param.ds_shape) + else: + param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(), + non_blocking=True).view(param.ds_shape) + self.__param = param + + def wait(self) -> None: + if not get_accelerator().is_synchronized_device(): + get_accelerator().current_stream().synchronize() + self.__param.ds_status = ZeroParamStatus.AVAILABLE + + +class NoGatherCoalescedHandle: + + def __init__(self, params: List[Parameter]) -> None: + self.__params = params + self.__complete = False + + for param in self.__params: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to not be available") + if hasattr(param.ds_tensor, "ds_quant_scale"): + param.data = Init.quantizer_module.dequantize(param.ds_tensor.data, param.ds_tensor.ds_quant_scale).to( + device=get_accelerator().current_device_name(), non_blocking=True).view(param.ds_shape) + else: + param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(), + non_blocking=True).view(param.ds_shape) + + @instrument_w_nvtx + def wait(self) -> None: + if self.__complete: + return + + if not get_accelerator().is_synchronized_device(): + get_accelerator().current_stream().synchronize() + for param in self.__params: + assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" + param.ds_status = ZeroParamStatus.AVAILABLE + + self.__complete = True + + +def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None): + return instrument_w_nvtx(dist.allgather_fn)(output_tensor, input_tensor, group=group, async_op=True) + + +def print_rank_0(message, debug=False, force=False): + rank = dist.get_rank() + if rank == 0 and (debug or force): + print(message) + # other variations + # - print for all ranks w/o interleaving + # printflock(f"[{rank}] {message}") + # - print to log file per rank + # log_rank_file(rank, message) + + +def debug_rank0(msg: str) -> None: + if dist.get_rank() == 0: + logger.debug(msg) + + +def _init_external_params(module): + if not hasattr(module, '_external_params'): + module._external_params = {} + + def external_parameters(self): + return self._external_params.items() + + def all_parameters(self): + return itertools.chain(self.named_parameters(self, recurse=False), external_parameters(self)) + + module.ds_external_parameters = types.MethodType(external_parameters, module) + module.all_parameters = types.MethodType(all_parameters, module) + + +def register_external_parameter(module, parameter): + """Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in + the forward and backward passes of ``module``. + + This is used when a parameter is accessed outside of its owning module's + ``forward()``. DeepSpeed must know to collect it from its partitioned + state and when to release the memory. + + .. note:: + This is only applicable to training with ZeRO stage 3. + + Args: + module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass. + parameter (``torch.nn.Parameter``): The parameter to register. + + Raises: + RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``. + + + Examples + ======== + + #. Register a weight that is used in another module's forward pass (line 6). + Parameter ``layer1.weight`` is used by ``layer2`` (line 11). + + .. code-block:: python + :linenos: + :emphasize-lines: 6,11 + + class ModuleZ3(torch.nn.Module): + def __init__(self, *args): + super().__init__(self, *args) + self.layer1 = SomeLayer() + self.layer2 = OtherLayer() + deepspeed.zero.register_external_parameter(self, self.layer1.weight) + + def forward(self, input): + x = self.layer1(input) + # self.layer1.weight is required by self.layer2.forward + y = self.layer2(x, self.layer1.weight) + return y + """ + if not isinstance(parameter, torch.nn.Parameter): + raise RuntimeError('Parameter is not a torch.nn.Parameter') + + if not hasattr(module, '_external_params'): + _init_external_params(module) + + key = id(parameter) + module._external_params[key] = parameter + + +def unregister_external_parameter(module, parameter): + """Reverses the effects of :meth:`register_external_parameter`. + + Args: + module (``torch.nn.Module``): The module to affect. + parameter (``torch.nn.Parameter``): The parameter to unregister. + + Raises: + RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``. + RuntimeError: If ``parameter`` is not a registered external parameter of ``module``. + """ + if not isinstance(parameter, torch.nn.Parameter): + raise RuntimeError('Parameter is not a torch.nn.Parameter') + + if not hasattr(module, '_external_params') or id(parameter) not in module._external_params: + raise RuntimeError('Parameter is not a registered external parameter of module.') + + key = id(parameter) + del module._external_params[key] + + +class ZeroParamType(Enum): + + # same as regular pytorch parameters + NORMAL = 1 + + # parameters are partitioned across data parallel process + PARTITIONED = 2 + + # the parameter is held with a unique process rank + # and is not available on all other process + REMOTE = 3 + + +class ZeroParamStatus(Enum): + # parameters are fully present and ready for use on all processes + AVAILABLE = 1 + + # parameters are either partitioned or remote in some or all process + NOT_AVAILABLE = 2 + + # parameters are being gathered. + INFLIGHT = 3 + + +_orig_torch_tensor = torch.tensor +_orig_torch_empty = torch.empty +_orig_torch_zeros = torch.zeros +_orig_torch_ones = torch.ones +_orig_torch_full = torch.full +_orig_torch_arange = torch.arange +_orig_torch_eye = torch.eye +_orig_torch_randn = torch.randn + + +def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable: + + def wrapped_fn(*args, **kwargs) -> Tensor: + if kwargs.get("device", None) is None: + kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) + tensor: Tensor = fn(*args, **kwargs) + if tensor.is_floating_point(): + tensor.data = tensor.data.to(target_fp_dtype) + + return tensor + + return wrapped_fn + + +def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable: + + def new_tensor(cls, *args, **kwargs) -> Tensor: + device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) + if not args: + args = (0, ) + tensor = _orig_torch_empty(0, device=device).new_empty(*args, **kwargs) + if tensor.is_floating_point(): + tensor = tensor.to(dtype) + + return tensor + + return new_tensor + + +# https://stackoverflow.com/a/63851681/9201239 +def get_all_subclasses(cls): + subclass_list = [] + + def recurse(cl): + for subclass in cl.__subclasses__(): + subclass_list.append(subclass) + recurse(subclass) + + recurse(cls) + + return set(subclass_list) + + +@instrument_w_nvtx +def free_param(param: Parameter) -> None: + """Free underlying storage of a parameter.""" + assert not param.ds_active_sub_modules, param.ds_summary() + if get_accelerator().on_accelerator(param.data): + # need to make sure that we don't free the parameter while it is still + # being used for computation + if not get_accelerator().is_synchronized_device(): + param.data.record_stream(get_accelerator().current_stream()) + # param.data doesn't store anything meaningful in partitioned state + param.data = torch.empty(0, dtype=param.dtype, device=param.device) + param.ds_status = ZeroParamStatus.NOT_AVAILABLE + + +reuse_buffers = False +temp_contiguous_tensor = None +empty_buffers = {} + + +# Inserts _post_init_method at the end of init method +# for all sub classes of torch.nn.Module +class InsertPostInitMethodToModuleSubClasses(object): + num_module_parameters = 0 + num_module_elements = 0 + + def __init__(self, enabled=True, mem_efficient_linear=True, ds_config=None, dtype=None): + self.mem_efficient_linear = mem_efficient_linear + self.enabled = enabled + self._set_dtype(ds_config, dtype) + assert self.dtype in [ + torch.half, torch.bfloat16, torch.float + ], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]" + self.wrapped_cls = set() + self.skip_init_depth = 0 + + self.quantized_initialization = None + if ds_config is not None and ds_config.weight_quantization_config and ds_config.weight_quantization_config.quantized_initialization: + self.quantized_initialization = ds_config.weight_quantization_config.quantized_initialization + + def __enter__(self): + if not self.enabled: + return + + global zero_init_context + if zero_init_context == 0: + self.patch_init_and_builtins() + global top_level_context + top_level_context = self + + zero_init_context += 1 + + def __exit__(self, exc_type, exc_value, traceback): + if not self.enabled: + return + + global zero_init_context + zero_init_context -= 1 + + # Exiting the top level context + if zero_init_context == 0: + self.unpatch_init_and_builtins() + global top_level_context + top_level_context = None + + if dist.get_rank() == 0: + billion_elems = InsertPostInitMethodToModuleSubClasses.num_module_elements / 1e9 + num_params = InsertPostInitMethodToModuleSubClasses.num_module_parameters + logger.info( + f"finished initializing model - num_params = {num_params}, num_elems = {billion_elems:.2f}B") + + # Now that we cleaned up the metaclass injection, raise the exception. + if exc_type is not None: + return False + + # To be implemented by inheriting classes + def _post_init_method(self, module): + pass + + def _set_dtype(self, ds_config, dtype): + if ds_config is not None and dtype is None: + if ds_config.bfloat16_enabled and ds_config.fp16_enabled: + raise RuntimeError("bfloat16 and fp16 cannot be enabled at once") + + if ds_config.bfloat16_enabled: + self.dtype = torch.bfloat16 + elif ds_config.fp16_enabled: + self.dtype = torch.half + else: + self.dtype = torch.float + else: + self.dtype = dtype or torch.float16 if get_accelerator().is_fp16_supported( + ) else torch.bfloat16 if get_accelerator().is_bf16_supported else torch.float32 + + def patch_init_and_builtins(self): + + def apply_with_gather(orig_module_apply_fn: Callable) -> Callable: + """many models make use of child modules like Linear or Embedding which + perform their own weight initialization in their __init__ methods, + but will then have more weight initialization in a parent module's __init__ + method that modifies weights of child modules, which is typically done + using the Module.apply method. + + since the Init context manager partitions child modules immediately after + they are initialized, without modifying apply we would entirely skip + any initialization done by parent modules. + + to get around this issue, we wrap the function passed to Module.apply + so that the applied function is applied to child modules correctly. + """ + + def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable: + if hasattr(fn_to_apply, "wrapped"): + return fn_to_apply + + @functools.wraps(fn_to_apply) + def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None: + """gathers parameters before calling apply function. afterwards + parameters are broadcasted to ensure consistency across all ranks + then re-partitioned. + + takes the following steps: + 1. allgathers parameters for the current module being worked on + 2. calls the original function + 3. broadcasts root rank's parameters to the other ranks + 4. re-partitions the parameters + """ + + # TODO Delay error checking for dangling partitioned parameters to post module init + # raise RuntimeError(f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, " + # f"were zero params, is it possible that the parameters were " + # f"overwritten after they were initialized? " + # f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} ") + + params_to_apply_fn_to: Iterable[Parameter] = list( + sorted([p for p in module_to_apply_fn_to.parameters(recurse=False) if is_zero_param(p)], + key=lambda p: p.ds_id)) + + for param in params_to_apply_fn_to: + param.all_gather() + + fn_to_apply(module_to_apply_fn_to) + + for param in params_to_apply_fn_to: + dist.broadcast(param.data, 0, group=param.ds_process_group) + + for param in params_to_apply_fn_to: + param.partition(has_been_updated=True) + + wrapped_fn_to_apply.wrapped = True + + return wrapped_fn_to_apply + + @functools.wraps(orig_module_apply_fn) + def wrapped_apply(module: Module, fn_to_apply: Callable) -> None: + orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply)) + + return wrapped_apply + + def hook_for_skip_init(module): + # this function is intended for handling the logic of torch.nn.utils.skip_init + # skip_init:module_cls(*args, **kwargs).to_empty(device=final_device), where kwargs['device']='meta' + # the function call occurs between module_cls(*args, **kwargs) and to_empty(device=final_device). + def partition_after_empty_init(f): + + @functools.wraps(f) + def wrapper(module, *args, **kwargs): + _module = f(module, *args, **kwargs) + # here is the post-hook for module.apply(empty_like...) + # after module.apply(empty_like...), the module has completed its empty init on real device + # since skip_init won't involve any computations or weight adjustments, we can directly utilize post_init + self._post_init_method(_module) + return _module + + return wrapper + + def post_wrapper_to_empty(f): + # append some wrapper restoration after to_empty() call + @functools.wraps(f) + def wrapper(*args, **kwargs): + res = f(*args, **kwargs) + # restore _apply hook + for subclass in get_all_subclasses(torch.nn.modules.module.Module): + _disable_class_apply(subclass) + # self restore + module.to_empty = f + return res + + return wrapper + + def _enable_class_apply(cls): + cls._old_apply_of_skip_init_hook = cls._apply + cls._apply = partition_after_empty_init(cls._apply) + + def _disable_class_apply(cls): + cls._apply = cls._old_apply_of_skip_init_hook + + # add hooks for to_empty: apply_(empty_like) + for subclass in get_all_subclasses(torch.nn.modules.module.Module): + _enable_class_apply(subclass) + + # add a restore hook when exiting skip_init + module.to_empty = post_wrapper_to_empty(module.to_empty) + + def partition_after(f): + + @functools.wraps(f) + def wrapper(module, *args, **kwargs): + + # important logic: We want to run post_init only after child's __init__ is + # completed, and do nothing after __init__ of any of its parents and grandparents in + # the inheritance ancestry. This way the partitioning will need to happen only once + # when the whole object is ready to be partitioned and not before. This is because + # often the child module will need to tweak the weights - for example running a + # custom weights init function. So if a parent created the weights param, the child + # won't need to gather it in order to tweak it + + print_rank_0(f'Before initializing {module.__class__.__name__}', force=False) + + is_child_module = False + if not hasattr(module, "_ds_child_entered"): + # child's __init__ was called, since parents all see the same object they can now skip post_init + is_child_module = True + setattr(module, "_ds_child_entered", True) + + init_on_meta = 'device' in kwargs and kwargs['device'] == 'meta' + if init_on_meta: + self.skip_init_depth += 1 + + f(module, *args, **kwargs) + if init_on_meta and self.skip_init_depth == 1: + # check and handle the logic of empty_init + hook_for_skip_init(module) + if is_child_module: + # child's __init__ is done, now we can run a single post_init on the child object + delattr(module, "_ds_child_entered") + + print_rank_0(f'Running post_init for {module.__class__.__name__}', force=False) + if self.skip_init_depth == 0: + self._post_init_method(module) + + print_rank_0(f'After initializing followed by post init for {module.__class__.__name__}', force=False) + if init_on_meta: + self.skip_init_depth -= 1 + + return wrapper + + def _enable_class(cls): + cls._old_init = cls.__init__ + cls.__init__ = partition_after(cls.__init__) + + def _init_subclass(cls, **kwargs): + cls._old_init = cls.__init__ + cls.__init__ = partition_after(cls.__init__) + + # Replace .__init__() for all existing subclasses of torch.nn.Module recursively + for subclass in get_all_subclasses(torch.nn.modules.module.Module): + _enable_class(subclass) + + # holding onto some methods so we can put them back the way they were in __exit__ + torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__ + torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply + torch.Tensor.__old_new__ = torch.Tensor.__new__ + + # Replace .__init__() for future subclasses of torch.nn.Module + torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass) + if Init.override_module_apply: + torch.nn.modules.module.Module.apply = apply_with_gather(torch.nn.modules.module.Module._old_apply) + + self._add_tensor_creation_wrappers() + + if self.mem_efficient_linear: + print_rank_0( + "nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.", + force=False) + self.linear_bk = torch.nn.functional.linear + torch.nn.functional.linear = zero3_linear_wrap + + if self.quantized_initialization: + print_rank_0("nn.functional.linear has been overridden with quantized linear version.", force=False) + torch.nn.functional.linear = wrap_quantized_functional(torch.nn.functional.linear) + torch.nn.functional.embedding = wrap_quantized_functional(torch.nn.functional.embedding) + for cls in WEIGHT_QUANTIZATION_LAYERS: + cls._load_from_state_dict = wrap_load_from_state_dict(cls._load_from_state_dict) + + logger.info("Enable Zero3 engine with INT4 quantization.") + + self.patched = True + + def unpatch_init_and_builtins(self): + if self.patched: + + def _disable_class(cls): + cls.__init__ = cls._old_init + + for subclass in get_all_subclasses(torch.nn.modules.module.Module): + _disable_class(subclass) + + # putting methods back the way we found them + torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass + if Init.override_module_apply: + torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply + + self._remove_tensor_creation_wrappers() + + self.patched = False + + def _add_tensor_creation_wrappers(self): + torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype) + torch.tensor = zero_wrapper_for_fp_tensor_constructor(_orig_torch_tensor, self.dtype) + torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty, self.dtype) + torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros, self.dtype) + torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype) + torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype) + torch.arange = zero_wrapper_for_fp_tensor_constructor(_orig_torch_arange, self.dtype) + torch.eye = zero_wrapper_for_fp_tensor_constructor(_orig_torch_eye, self.dtype) + torch.randn = zero_wrapper_for_fp_tensor_constructor(_orig_torch_randn, self.dtype) + + def _remove_tensor_creation_wrappers(self): + torch.Tensor.__new__ = torch.Tensor.__old_new__ + torch.tensor = _orig_torch_tensor + torch.empty = _orig_torch_empty + torch.zeros = _orig_torch_zeros + torch.ones = _orig_torch_ones + torch.full = _orig_torch_full + torch.arange = _orig_torch_arange + torch.eye = _orig_torch_eye + torch.randn = _orig_torch_randn + + +def shutdown_init_context(): + """ + This function is used to initialize deepspeed engine inside the context of Init. + We need to remove the wrappers but keep the context. + """ + if top_level_context: + top_level_context.unpatch_init_and_builtins() + + +def restore_init_context(): + """ + This function is used to restore the wrappers after deepspeed engine is initialized. + """ + if top_level_context: + top_level_context.patch_init_and_builtins() + + +class AllGatherHandle: + + def __init__(self, handle, param: Parameter, quantization=None) -> None: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to be available") + + self.__handle = handle + self.__param = param + self.__quantization = quantization + + def wait(self) -> None: + instrument_w_nvtx(self.__handle.wait)() + if self.__quantization: + instrument_w_nvtx(self.__quantization.quant_handle.wait)() + self.__param.data = self.__quantization.backend.dequantize( + self.__quantization.quantized_param, self.__quantization.scale_buffer).to(self.__param.device) + self.__param.ds_status = ZeroParamStatus.AVAILABLE + + +class AllGatherCoalescedHandle: + + def __init__( + self, + allgather_handle, + params: List[Parameter], + partitions: List[Tensor], + world_size: int, + use_secondary_tensor=False, + quantization=None, + ) -> None: + self.allgather_handle = allgather_handle + self.params = params + self.partitions = partitions + self.world_size = world_size + self.use_secondary_tensor = use_secondary_tensor + self.complete = False + self.quantization = quantization + + for param in self.params: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to not be available") + + @instrument_w_nvtx + def wait(self) -> None: + if self.complete: + return + + instrument_w_nvtx(self.allgather_handle.wait)() + + if self.quantization: + instrument_w_nvtx(self.quantization.quant_handle.wait)() + flat_tensor = self.quantization.backend.dequantize( + self.quantization.quantized_param, self.quantization.scale_buffer).to(self.params[0].device) + + self.partitions: List[Parameter] = [] + for i in range(self.world_size): + self.partitions.append( + flat_tensor.narrow(0, self.quantization.partition_sz * i, self.quantization.partition_sz)) + + # split the single tensor out into individual tensors + param_offset = 0 + for param in self.params: + assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" + partitions: List[Tensor] = [] + ds_tensor_numel = param.ds_tensor.ds_numel + if self.use_secondary_tensor: + ds_tensor_numel *= param.ds_secondary_tensor_num_of_groups + for rank in range(self.world_size): + param_start = rank * ds_tensor_numel + if param_start < param.ds_numel: + part_to_copy = self.partitions[rank].narrow(0, param_offset, + min(param.ds_numel - param_start, ds_tensor_numel)) + partitions.append(part_to_copy) + param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape) + param.ds_status = ZeroParamStatus.AVAILABLE + + for part_to_copy in partitions: + if not get_accelerator().is_synchronized_device(): + part_to_copy.record_stream(get_accelerator().current_stream()) + + param_offset += ds_tensor_numel + + self.complete = True + + +class MultipleAllGatherHandles: + + def __init__(self, handles: List[AllGatherCoalescedHandle]): + self.handles = handles + + def wait(self) -> None: + for handle in self.handles: + handle.wait() + + +class QuantizationInfo: + # a placeholder object to store all quant related vars used in handles + def __init__(self) -> None: + self.quantized_param = None + self.backend = None + self.quant_handle = None + self.scale_buffer = None + + +class CUDAQuantizer: + async_flag = True + target_group_size = 8000 # the optimal size is 4k, so we set the target to be below 8k + group_size_cache = dict() + quantizer_cuda_module = None + + def __init__(self) -> None: + if CUDAQuantizer.quantizer_cuda_module is None: + CUDAQuantizer.quantizer_cuda_module = deepspeed.ops.op_builder.QuantizerBuilder().load() + + def quantize(self, param, groups=None): + if groups is None: + try: + groups = self.group_size_cache[param.numel()] + except KeyError: + groups = math.ceil(param.numel() / self.target_group_size) + while groups < param.numel(): + if param.numel() % (8 * groups) == 0: + break + groups += 1 + while True: + if param.numel() % (8 * groups * 2) == 0 and param.numel( + ) / groups > self.target_group_size: #hard limit of 16k group_size + groups *= 2 + else: + break + assert ( + param.numel() % (8 * groups) == 0 + ), f"Qantized weight requires the number of weights be a multiple of 8. Yet {param.numel()} cannot be divided by 8*{groups}" + assert (param.numel() / groups < 16000), f"{param.numel()} / {groups} is larger than 16k" + assert param.numel( + ) > groups, f"Adaptive grouping algorithm cannot find a group size for input tensor of size {param.numel()}" + self.group_size_cache[param.numel()] = groups + return self.quantizer_cuda_module.quantize(param.to(get_accelerator().device_name()), groups, 8, + self.quantizer_cuda_module.Symmetric) + + def dequantize(self, quantized_param, scale): + return self.quantizer_cuda_module.dequantize(quantized_param, scale, scale.numel(), 8, + self.quantizer_cuda_module.Symmetric) + + +def _no_gather_coalesced(params: Iterable[Parameter]) -> AllGatherCoalescedHandle: + for param in params: + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(f"expect param.ds_status == ZeroParamStatus.NOT_AVAILABLE, got{param.ds_summary()}") + param.ds_status = ZeroParamStatus.INFLIGHT + + params = sorted(params, key=lambda p: p.ds_id) + if len(params) == 1: + param, = params + return NoGatherHandle(param) + return NoGatherCoalescedHandle(params) + + +# Replaces all parameters in module with Scattered Parameters +class Init(InsertPostInitMethodToModuleSubClasses): + param_id = 0 + param_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "param_persistence_threshold") + model_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "model_persistence_threshold") + num_persisted_parameters = 0 + num_persisted_elements = 0 + apply_param_persistence = False + override_module_apply = get_config_default(DeepSpeedZeroConfig, "override_module_apply") + + def __init__( + self, + module=None, + data_parallel_group=None, + mem_efficient_linear=True, + remote_device=None, + pin_memory=False, + config_dict_or_path=None, + config=None, + enabled=True, + dtype=None, + mpu=None, + zero_param_parallel_group=None, + zero_quantized_weights=False, + zero_quantized_nontrainable_weights=False, + sequence_data_parallel_group=None, + param_swapper=None, + ): + """A context to enable massive model construction for training with + ZeRO-3. Models are automatically partitioned (or, sharded) across the + system and converted to half precision. + + Args: + module (``torch.nn.Module``, optional): If provided, partition the model as + if it was constructed in the context. + data_parallel_group (``deepspeed.comm`` process group, optional): + The group of processes to partition among. Defaults to all processes. + mem_efficient_linear (bool, optional): Replace + torch.nn.functional.linear with an implementation that allows + DeepSpeed to partition parameters. Defaults to ``True``. + remote_device (string, optional): The initial device to store model + weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU + memory. The model may still be moved to GPU based on the + offload settings for training. Defaults to param offload device if a config is + defined, otherwise GPU. + pin_memory (bool, optional): Potentially increase performance by + using pinned memory for model weights. ``remote_device`` must be + ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``. + config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration + for swapping fp16 params to NVMe. + config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead. + enabled (bool, optional): If ``False``, this context has no + effect. Defaults to ``True``. + dtype (``dtype``, optional): Can be used to change the data type of the parameters. + Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None`` + mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}. + zero_param_parallel_group(``object``, optional): Parallel (comm) group for dual partitioning of ZeRO params. + zero_quantized_weights (bool, optional): If ``True``, turn on quantized weights in all gather weights. Default is ``False`` + zero_quantized_nontrainable_weights (bool, optional): If ``True``, nontrainable weights will be stored in quantized format. Default is ``False`` + param_swapper (``deepspeed.runtime.swap_tensor.partitioned_param_swapper.AsyncPartitionedParameterSwapper``, optional): [Experimental] Use existing parameter swapper. Defaults to ``None``. + This argument will be removed in the near future. + + This context accelerates model initialization and enables models that + are too large to allocate in their entirety in CPU memory. It has the + following effects: + + #. allocates tensors to either GPU or CPU memory or NVMe + #. converts floating point tensors to half precision + #. immediately partitions tensors among the group of data-parallel devices + #. (*optional*) replaces ``torch.nn.functional.linear`` with a more + memory-efficient implementation + + These modifications allow for models that exceed the size of local CPU/GPU + memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU + or GPU memory or NVMe) across all nodes. Consider initializing a model with one + trillion parameters, whose weights occupy two terabytes (TB) in half + precision. The initial CPU allocation in full precision requires 4TB of + memory *per process*, and so a system with 8 GPUs per node would need 32TB of + CPU memory due to data-parallel redundancies. Instead, by immediately + partitioning tensors we remove the redundancies. The result is that + regardless of the number of GPUs, we still only require the original 4TB. This + allows for a linear increase in model size with the aggregate system memory. + For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion + parameter model with 4 nodes and 32 GPUs. + + Important: If the fp16 weights of the model can't fit onto a single GPU memory + this feature must be used. + + .. note:: + Initializes ``deepspeed.comm`` if it has not already been done so. + See :meth:`deepspeed.init_distributed` for more information. + + .. note:: + Only applicable to training with ZeRO-3. + + Examples + -------- + + #. Allocate a model and partition it among all processes: + + .. code-block:: python + + with deepspeed.zero.Init(): + model = MyLargeModel() + + + #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes: + + .. code-block:: python + + with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(), + remote_device="cpu", + pin_memory=True): + model = MyLargeModel() + + + #. Partition an already-allocated model in CPU memory: + + .. code-block:: python + + model = deepspeed.zero.Init(module=model) + """ + if config is not None: + config_dict_or_path = config + logger.warning( + f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.') + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + mpu) if config_dict_or_path is not None else None + if _ds_config is not None: + if _ds_config.zero_config.memory_efficient_linear and _ds_config.compile_config.enabled: + # memory_efficient_linear displays numerous errors when torch.compile is enabled. + # Refer to https://github.com/pytorch/pytorch/issues/119059 for details. + # Further investigation into performance is necessary, even after resolving this issue because + # the `memory_efficient_linear` module may lead to more graph breaks compared to the original implementation. + logger.warning(f'memory_efficient_linear is disabled when torch.compile is enabled.') + mem_efficient_linear = False + else: + mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear + + super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype) + if not dist.is_initialized(): + init_distributed() + assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm" + + if data_parallel_group is None and sequence_data_parallel_group is None: + self.ds_process_group = dist.get_world_group() + elif sequence_data_parallel_group is not None: + self.ds_process_group = sequence_data_parallel_group + elif data_parallel_group is not None: + self.ds_process_group = data_parallel_group + else: # both given + raise ValueError( + "Both 'data_parallel_group' and 'sequence_data_parallel_group' were specified. Please provide only one of these arguments." + ) + + self.rank = dist.get_rank(group=self.ds_process_group) + self.dp_world_size = dist.get_world_size(group=self.ds_process_group) + + self.zero_param_process_group = zero_param_parallel_group + if _ds_config is not None and _ds_config.zero_config.zero_hpz_partition_size > 1 and self.zero_param_process_group is None: + groups._create_zero_param_parallel_group(_ds_config.zero_config.zero_hpz_partition_size) + self.zero_param_process_group = groups._get_zero_param_intra_parallel_group() + + self.num_ranks_in_param_group = self.dp_world_size + self.rank_in_group = self.rank + self.num_param_groups = 1 + + if self.zero_param_process_group is not None: + self.num_ranks_in_param_group = groups._get_zero_param_intra_parallel_group_world_size() + self.num_param_groups = int(self.dp_world_size / self.num_ranks_in_param_group) + self.rank_in_group = groups._get_zero_param_intra_parallel_rank_in_mygroup() + print_rank_0(f"hpZeRO group size: {self.num_ranks_in_param_group}", force=True) + + logger.debug( + "hpZeRO partition parameter my rank in world {} my rank in group {} ranks in my param partition group: {} " + .format(self.rank, self.rank_in_group, groups._get_zero_param_intra_parallel_group_ranks())) + + # Local device is the device where the parameters are consumed, must be default device. + # It is the device where parameters are fully instantiated using allgather + self.local_device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) + get_accelerator().set_device(self.local_device) + + self.quantized_weights = zero_quantized_weights + if _ds_config is not None and _ds_config.zero_config.zero_quantized_weights and not self.quantized_weights: + self.quantized_weights = _ds_config.zero_config.zero_quantized_weights + self.quantized_nontrainable_weights = zero_quantized_nontrainable_weights + if _ds_config is not None and _ds_config.zero_config.zero_quantized_nontrainable_weights and not self.quantized_nontrainable_weights: + self.quantized_nontrainable_weights = _ds_config.zero_config.zero_quantized_nontrainable_weights + + self.module = module + if (self.quantized_weights or self.quantized_nontrainable_weights): + self.quantizer_module = CUDAQuantizer() + print_rank_0(f'Using quantizer for weights: {self.quantizer_module.__class__.__name__}', force=True) + + if _ds_config is not None: + Init.override_module_apply = _ds_config.zero_config.override_module_apply + + if _ds_config.zero_config.offload_param is not None: + remote_device = _ds_config.zero_config.offload_param.device + pin_memory = _ds_config.zero_config.offload_param.pin_memory + + self._validate_remote_device(remote_device, _ds_config) + + # Remote device is the device where parameter partitions are stored + # It can be same as local_device or it could be CPU or NVMe. + self.remote_device = self.local_device if remote_device in [None, OffloadDeviceEnum.none] else remote_device + self.pin_memory = pin_memory if (self.remote_device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme + ]) else False + + # Enable fp16 param swapping to NVMe + if self.remote_device == OffloadDeviceEnum.nvme: + self.param_swapper = param_swapper or AsyncPartitionedParameterSwapper(_ds_config, self.dtype) + else: + self.param_swapper = None + + # If we are provided an already-allocated module to prepare. + if module is not None: + assert isinstance(module, torch.nn.Module) + self._convert_to_zero_parameters(module.parameters(recurse=True)) + + self.use_all_gather_into_tensor = dist.has_all_gather_into_tensor() + if not self.use_all_gather_into_tensor: + logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}") + + def _update_persist_config(self, ds_config): + Init.apply_param_persistence = True + Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold + Init.model_persistence_threshold = ds_config.zero_config.model_persistence_threshold // self.num_partitions + + def _zero_init_param(self, param): + self._convert_to_deepspeed_param(param) + if dist.get_world_group() == self.get_dp_process_group(): + dist.broadcast(param.data, 0, self.get_dp_process_group()) + else: + dist.broadcast(param.data, dist.get_global_rank(self.get_dp_process_group(), 0), + self.get_dp_process_group()) + param.partition() + + def _convert_to_zero_parameters(self, param_list): + for param in param_list: + if is_zero_param(param): + continue + + param.data = param.data.to(self.local_device) + self._zero_init_param(param) + + def _validate_remote_device(self, remote_device, ds_config): + if ds_config is not None: + if remote_device in [None, OffloadDeviceEnum.cpu]: + if ds_config.zero_config.offload_param is not None: + offload_param_device = ds_config.zero_config.offload_param.device + assert offload_param_device != OffloadDeviceEnum.nvme, \ + f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}." + + if remote_device == OffloadDeviceEnum.nvme: + assert ds_config.zero_config.offload_param is not None, \ + f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.' + + assert ds_config.zero_config.offload_param.nvme_path is not None, \ + f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}' + + def _post_init_method(self, module): + #see_memory_usage(f"Before converting params in {module.__class__.__name__}", force=False) + print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False) + see_memory_usage(f"Before converting and partitioning params in {module.__class__.__name__}", force=False) + + for name, param in module.named_parameters(recurse=False): + print_rank_0(f'Analyzing param {name} in {module.__class__.__name__}', force=False) + InsertPostInitMethodToModuleSubClasses.num_module_parameters += 1 + InsertPostInitMethodToModuleSubClasses.num_module_elements += param.numel() + if not is_zero_param(param): + if not get_accelerator().on_accelerator(param): + param.data = param.data.to(self.local_device) + + if name == 'weight' and self.quantized_initialization and type(module) in WEIGHT_QUANTIZATION_LAYERS: + _quantize_param(param, self.quantized_initialization) + + self._zero_init_param(param) + print_rank_0( + f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}") + + see_memory_usage( + f"Param count {InsertPostInitMethodToModuleSubClasses.num_module_elements}. After converting and partitioning params in {module.__class__.__name__}", + force=False) + + def _convert_to_deepspeed_param(self, param): + + # Partitioned, Normal, Remote + param.ds_param_type = ZeroParamType.PARTITIONED + + # Replicated vs Partitioned vs Inflight + param.ds_status = ZeroParamStatus.AVAILABLE + + # Stores the shape of the original tensor + param.ds_shape = param.shape + + # Stores the number of elements in the original parameter without padding + param.ds_numel = param.numel() + + # Stores the partitioned copy of the tensor + param.ds_tensor = None + + # Keeps track of how many active sub-modules need this param at any given point in time + param.ds_active_sub_modules = set() + + # If this flag is true, then the parameters are replicated throughput training + # And only partitioned before the step + if Init.apply_param_persistence and param.ds_numel <= Init.param_persistence_threshold and Init.num_persisted_elements + param.ds_numel <= Init.model_persistence_threshold: + param.ds_persist = True + Init.num_persisted_parameters += 1 + Init.num_persisted_elements += param.ds_numel + else: + param.ds_persist = False + + param.is_external_param = False + + # The group that the parameter is scattered across. + param.ds_process_group = self.ds_process_group + + # Stores the secondary partitioned copy of the tensor + param.ds_secondary_tensor = None + + #Process group for secondary partition all (group) gather + param.ds_zero_param_process_group = self.zero_param_process_group + param.ds_secondary_tensor_group_size = self.num_ranks_in_param_group + param.ds_secondary_tensor_num_of_groups = self.num_param_groups + + # This is set to the Async Param swapper if remote device is nvme + # else this is set to None + param.nvme_swapper = self.param_swapper + + # DeepSpeed Param ID + param.ds_id = Init.param_id + Init.param_id += 1 + + def all_gather(param_list=None, async_op=False, hierarchy=0): + cls = param + if param_list is None: + param_list = [cls] + return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy) + + def _all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group): + partition_sz = sum(p.ds_tensor.ds_numel for p in params) + + use_secondary_tensor = params[0].ds_secondary_tensor is not None + + if use_secondary_tensor: + partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params) + + flat_tensor = torch.empty(partition_sz * world_size, + dtype=dtype, + device=get_accelerator().current_device_name(), + requires_grad=False) + + partitions: List[Parameter] = [] + for i in range(world_size): + partitions.append(flat_tensor.narrow(0, partition_sz * i, partition_sz)) + + if use_secondary_tensor: + instrument_w_nvtx( + torch.cat)([p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params], + out=partitions[rank_in_group]) + else: + instrument_w_nvtx(torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params], + out=partitions[rank_in_group]) + handle = _dist_allgather_fn(partitions[rank_in_group], flat_tensor, ds_process_group) + #Fix get_partition_dp_group(params[0])) + + return AllGatherCoalescedHandle( + allgather_handle=handle, + params=params, + partitions=partitions, + world_size=world_size, + use_secondary_tensor=use_secondary_tensor, + ) + + @instrument_w_nvtx + def all_gather_coalesced(params: Iterable[Parameter], + safe_mode: bool = False, + quantize: bool = False) -> AllGatherCoalescedHandle: + + # fetches from nvme if the partition is not available and in nvme + self._ensure_availability_of_partitioned_params(params) + + if self.num_partitions == 1: + return _no_gather_coalesced(params) + + for param in params: + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(param.ds_summary()) + param.ds_status = ZeroParamStatus.INFLIGHT + + #use appropriate all gather process group + ds_process_group = self.ds_process_group + rank_in_group = self.rank + world_size = self.dp_world_size + use_secondary_tensor = params[0].ds_secondary_tensor is not None + if self.zero_param_process_group and use_secondary_tensor: + ds_process_group = self.zero_param_process_group #intragroup + rank_in_group = self.rank_in_group + world_size = self.num_ranks_in_param_group + + #pprint(dir(ds_process_group)) + # ensure that each rank has params in same order. the allgather + # is done by flattening the parameter list into a single tensor that + # can be allgathered in a single call - this means that if each rank + # gives a list of the same parameters in a different order we will + # silently get incorrect parameter values, and have very difficult + # to debug correctness issues. + params = sorted(params, key=lambda p: p.ds_id) + + if logger.isEnabledFor(logging.DEBUG): + debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}") + + if safe_mode: + # ensure that same list (with same ordering) of parameters are + # being allgathered across all ranks, otherwise could mix + # data between tensors. + assert_ints_same_as_other_ranks([p.ds_id for p in params]) + # ensure that tensors from each rank agree on the same ds_numel + # otherwise could mix data between tensors. + assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params]) + + if len(params) == 1: + # have an opportunity to avoid some intermediate memory allocations + param = params[0] + buffer_size = math.ceil(param.ds_numel / world_size) * world_size + if use_secondary_tensor: + buffer_size = param.ds_secondary_tensor.shape[0] * world_size #make sure out is appropriately sized + + param_ds_tensor = param.ds_secondary_tensor if use_secondary_tensor else param.ds_tensor + param_buffer = torch.empty( + buffer_size, + dtype=param_ds_tensor.dtype if not quantize else torch.int8, + device=get_accelerator().current_device_name(), + requires_grad=False, + ) + if not quantize: + handles = _dist_allgather_fn( + param_ds_tensor.to(get_accelerator().current_device_name()), + param_buffer, + ds_process_group, + ) + param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(param.device) + return AllGatherHandle(handles, param) + else: + if hasattr(param_ds_tensor, "ds_quant_scale"): + scales = param_ds_tensor.ds_quant_scale + quantized_param = param_ds_tensor.data + else: + quantized_param, scales = self.quantizer_module.quantize(param_ds_tensor) + handle = _dist_allgather_fn(quantized_param.to(get_accelerator().current_device_name()), + param_buffer, ds_process_group) + + quant_scale_buffer = torch.empty( + scales.numel() * world_size, + dtype=scales.dtype, + device=get_accelerator().current_device_name(), + requires_grad=False, + ) + quant_handle = _dist_allgather_fn(scales.to(get_accelerator().current_device_name()), + quant_scale_buffer, ds_process_group) + quant_info = QuantizationInfo() + quant_info.quantized_param = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to( + param.device) + quant_info.backend = self.quantizer_module + quant_info.quant_handle = quant_handle + quant_info.scale_buffer = quant_scale_buffer + return AllGatherHandle(handle, param, quantization=quant_info) + + else: + if not quantize: + dtype_params = defaultdict(list) + for p in params: + dtype_params[p.ds_tensor.dtype].append(p) + handles = [] + for dtype, params in dtype_params.items(): + handles.append(_all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group)) + + return MultipleAllGatherHandles(handles) + + else: + partition_sz = sum(p.ds_tensor.ds_numel for p in params) + + if use_secondary_tensor: + partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params) + + flat_tensor = torch.empty(partition_sz * world_size, + dtype=torch.int8, + device=get_accelerator().current_device_name(), + requires_grad=False) + + if use_secondary_tensor: + if hasattr(params[0].ds_secondary_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.data.to(get_accelerator().current_device_name()) for p in params + ]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) + for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params + ])) + else: + if hasattr(params[0].ds_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)( + [p.ds_tensor.data.to(get_accelerator().current_device_name()) for p in params]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)( + [p.ds_tensor.to(get_accelerator().current_device_name()) for p in params])) + quant_scale_buffer = torch.empty( + scales.numel() * world_size, + dtype=torch.float32, + device=get_accelerator().current_device_name(), + requires_grad=False, + ) + handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group) + quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group) + quant_info = QuantizationInfo() + quant_info.quantized_param = flat_tensor + quant_info.backend = self.quantizer_module + quant_info.quant_handle = quant_handle + quant_info.scale_buffer = quant_scale_buffer + quant_info.partition_sz = partition_sz + quant_info.world_size = world_size + return AllGatherCoalescedHandle( + allgather_handle=handle, + params=params, + partitions=None, + world_size=world_size, + use_secondary_tensor=use_secondary_tensor, + quantization=quant_info, + ) + + def partition(param_list=None, hierarchy=0, has_been_updated=False): + cls = param + print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}", + force=False) + if param_list is None: + param_list = [cls] + self._partition(param_list, has_been_updated=has_been_updated) + + def reduce_gradients_at_owner(param_list=None, hierarchy=0): + cls = param + if param_list is None: + param_list = [cls] + print_rank_0( + f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner" + ) + self._reduce_scatter_gradients(param_list) + + def partition_gradients(param_list=None, partition_buffers=None, hierarchy=0, accumulate=False): + cls = param + print_rank_0( + f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}") + if param_list is None: + param_list = [cls] + if isinstance(partition_buffers, torch.Tensor): + partition_buffers = [partition_buffers] + + self._partition_gradients(param_list, partition_buffers=partition_buffers, accumulate=accumulate) + + def aligned_size(): + return self._aligned_size(param) + + def padding_size(): + return self._padding_size(param) + + def partition_numel(): + return self._partition_numel(param) + + def item_override(): + param.all_gather() + return param._orig_item() + + def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict: + return { + "id": debug_param2name_id(slf) if use_debug_name else slf.ds_id, + "status": slf.ds_status.name, + "numel": slf.numel(), + "ds_numel": slf.ds_numel, + "shape": tuple(slf.shape), + "ds_shape": tuple(slf.ds_shape), + "requires_grad": slf.requires_grad, + "grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None, + "persist": slf.ds_persist, + "active_sub_modules": slf.ds_active_sub_modules, + "ds_tensor.shape": slf.ds_tensor.shape if slf.ds_tensor is not None else None + } + + def convert_to_zero_parameters(param_list): + self._convert_to_zero_parameters(param_list) + + def allgather_before(func: Callable) -> Callable: + + def wrapped(*args, **kwargs): + param.all_gather() + return func(*args, **kwargs) + + return wrapped + + # Collectives for gathering and partitioning parameters + param.all_gather = all_gather + param.all_gather_coalesced = all_gather_coalesced + param.partition = partition + + # Collective for averaging gradients + param.reduce_gradients_at_owner = reduce_gradients_at_owner + param.partition_gradients = partition_gradients + + # Partitioning size utilities + param.aligned_size = aligned_size + param.padding_size = padding_size + param.partition_numel = partition_numel + param.ds_summary = types.MethodType(ds_summary, param) + + param.item = allgather_before(param.item) + + param.convert_to_zero_parameters = convert_to_zero_parameters + + def _aligned_size(self, param): + return param.ds_numel + self._padding_size(param) + + def _padding_size(self, param): + remainder = param.ds_numel % self.num_partitions + return (self.num_partitions - remainder) if remainder else 0 + + def _partition_numel(self, param): + return param.ds_tensor.ds_numel + + def _ensure_availability_of_partitioned_params(self, params): + swap_in_list = [] + swap_in_flight = [] + for param in params: + if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: + assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE + swap_in_list.append(param) + if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT: + assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE + swap_in_flight.append(param) + if len(swap_in_list) > 0: + swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False) + elif len(swap_in_flight) > 0: + swap_in_flight[0].nvme_swapper.synchronize_reads() + + @instrument_w_nvtx + def _all_gather(self, param_list, async_op=False, hierarchy=None): + + # fetches from nvme if the partition is not available and in nvme + self._ensure_availability_of_partitioned_params(param_list) + + handles = [] + all_gather_list = [] + for param in param_list: + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if async_op: + handle = self._allgather_param(param, async_op=async_op, hierarchy=hierarchy) + param.ds_status = ZeroParamStatus.INFLIGHT # if async_op else ZeroParamStatus.AVAILABLE + handles.append(handle) + else: + all_gather_list.append(param) + # note: param_list may contain params that are already in flight / aviailable. So we need to use all_gather_list + if not async_op: + if len(all_gather_list) == 1: + ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy) + else: + all_gather_quantize_list = [] + all_gather_nonquantize_list = [] + for param in all_gather_list: + if hasattr(param.ds_tensor, + "ds_quant_scale") or (hasattr(param, "ds_secondary_tensor") + and hasattr(param.ds_secondary_tensor, "ds_quant_scale")): + all_gather_quantize_list.append(param) + else: + all_gather_nonquantize_list.append(param) + # _allgather_params_coalesced always return None + self._allgather_params_coalesced(all_gather_nonquantize_list, hierarchy, quantize=False) + self._allgather_params_coalesced(all_gather_quantize_list, hierarchy, quantize=True) + for param in all_gather_list: + param.ds_status = ZeroParamStatus.AVAILABLE + return None + + return handles + + def _partition(self, param_list, force=False, has_been_updated=False): + for param in param_list: + print_rank_0(f"Before Partitioning Param {param.ds_id}", force=False) + if self.zero_param_process_group is not None: + self._partition_param_sec(param) + self._partition_param(param, has_been_updated=has_been_updated) + + param.ds_status = ZeroParamStatus.NOT_AVAILABLE + # if param.ds_tensor is not None: + # assert id(param.data) == id(param.ds_tensor.data), \ + # "After the parameters are initially partitioned, make sure we are not recreating the partition." + #print_rank_0(f"After Partitioning Param {param.ds_id} {param.ds_tensor.size()} {param.ds_tensor}",force=False) + @instrument_w_nvtx + def _partition_param(self, param, buffer=None, has_been_updated=False): + assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight" + global reuse_buffers + print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}", force=False) + if param.ds_status is ZeroParamStatus.AVAILABLE: + print_rank_0(f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}", force=False) + # if reuse_buffers and False: + # numel = buffer.numel() + # buffer = param.data.view(-1) + # print_rank_0( + # "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers", + # force=False) + # if numel in empty_buffers: + # empty_buffers[numel].append(buffer) + + # if deepspeed.comm.get_rank(): + # print(f"Releasing {param.data.numel()}") + + if param.ds_tensor is not None and not has_been_updated: ##param already partitioned + + #print_rank_0(f"Param {param.ds_id} pri {param.ds_tensor.size()} loc? {param.ds_tensor.final_location}", force=True) + #param.data = param.ds_tensor.data + + see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False) + # param.data does not store anything meaningful in partitioned state + free_param(param) + see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) + + if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: + print_rank_0(f"Param {param.ds_id} partition released since it exists in nvme", force=False) + param.nvme_swapper.remove_partition_and_release_buffers([param]) + print_rank_0( + f"after swap Param {param.ds_id} {param.ds_tensor.shape} partition released since it exists in nvme", + force=False) + + return + + tensor_size = self._aligned_size(param) + partition_size = tensor_size // self.num_partitions + if param.ds_tensor is None: + final_location = None + if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor( + numel=partition_size): + final_location = OffloadDeviceEnum.nvme + buffer = self.param_swapper.get_buffer(param, partition_size) + partitioned_tensor = torch.empty(0, dtype=param.dtype, device=buffer.device) + partitioned_tensor.data = buffer.data + print_rank_0(f"ID {param.ds_id} Initializing partition for the first time for nvme offload.") + + else: + if param.ds_persist: + device = self.local_device + elif self.remote_device == OffloadDeviceEnum.nvme: + device = OffloadDeviceEnum.cpu + else: + device = self.remote_device + + partitioned_tensor = torch.empty(partition_size, dtype=param.dtype, device=device) + # quantize the tensor if it's not trainable + if not param.requires_grad and self.quantized_nontrainable_weights: + partitioned_tensor, partitioned_tensor.ds_quant_scale = self.quantizer_module.quantize( + partitioned_tensor) + + if device == OffloadDeviceEnum.cpu and self.pin_memory: + partitioned_tensor = get_accelerator().pin_memory(partitioned_tensor) + + partitioned_tensor.requires_grad = False + param.ds_tensor = partitioned_tensor + param.ds_tensor.ds_numel = partition_size + param.ds_tensor.status = PartitionedParamStatus.AVAILABLE + param.ds_tensor.final_location = final_location + + start = partition_size * self.get_partition_rank() + end = start + partition_size + + one_dim_param = param.contiguous().view(-1) + + if start < param.ds_numel and end <= param.ds_numel: + src_tensor = one_dim_param.narrow(0, start, partition_size) + + with torch.no_grad(): + # make sure param.ds_tensor requires_grad always be false, + # otherwise, torch tracer will complain. + param.ds_tensor.copy_(src_tensor) + + #partitioned_tensor = src_tensor.clone().detach().to(self.remote_device) + + else: + # partitioned_tensor = torch.zeros(partition_size, + # dtype=param.dtype, + # device=self.remote_device ) + + if start < param.ds_numel: + elems_to_copy = param.ds_numel - start + with torch.no_grad(): + # make sure param.ds_tensor requires_grad always be false, + # otherwise, torch tracer will complain. + param.ds_tensor.narrow(0, 0, + elems_to_copy).copy_(one_dim_param.narrow(0, start, elems_to_copy)) + + #print(f"Remote device {self.remote_device}") + + #param.ds_tensor = partitioned_tensor + + #param.data = param.ds_tensor.data + + # param.data does not store anything meaningful in partitioned state + + see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False) + free_param(param) + see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) + + if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: + self.param_swapper.swap_out_and_release([param]) + print_rank_0(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.") + see_memory_usage(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.", force=False) + + print_rank_0(f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}") + + @instrument_w_nvtx + def _partition_param_sec(self, param, buffer=None, has_been_updated=False): + assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight" + global reuse_buffers + ##support for NVME secondary param offload + #print_rank_0(f"SEC Param id {param.ds_id} status is {param.ds_status}", force=True) + if param.ds_status is ZeroParamStatus.AVAILABLE: + #check padding + tensor_size = self._aligned_size(param) + partition_size = tensor_size // self.dp_world_size + + secondary_partition_size = int(tensor_size // self.num_ranks_in_param_group) + if param.ds_secondary_tensor is None: + final_location = None + secondary_partitioned_tensor = torch.empty(secondary_partition_size, + dtype=param.dtype, + device=self.remote_device) + + if self.pin_memory: + secondary_partitioned_tensor = secondary_partitioned_tensor.pin_memory() + # quantize the tensor if it's not trainable + if not param.requires_grad and self.quantized_nontrainable_weights: + secondary_partitioned_tensor, secondary_partitioned_tensor.ds_quant_scale = self.quantizer_module.quantize( + secondary_partitioned_tensor) + secondary_partitioned_tensor.requires_grad = False + param.ds_secondary_tensor = secondary_partitioned_tensor + param.ds_secondary_tensor.ds_numel = secondary_partition_size + param.ds_secondary_tensor.status = PartitionedParamStatus.AVAILABLE + param.ds_secondary_tensor.final_location = final_location + + #use rank in group for secondary tensor + secondary_start = secondary_partition_size * self.rank_in_group + + secondary_end = secondary_start + secondary_partition_size + + one_dim_param = param.contiguous().view(-1) + + # ds_numel is unpadded, so the last chunk of the secondary tensor might not be secondary_partition_size + sec_numel = param.ds_numel - secondary_start if secondary_end > param.ds_numel else secondary_partition_size + + # copy from full tensor to secondary tensor + param.ds_secondary_tensor.narrow(0, 0, + sec_numel).copy_(one_dim_param.narrow(0, secondary_start, sec_numel)) + + # TODO: This is a temporary fix to avoid the issue that 2nd tensor all-gather happens before 2nd tensor partition is done + get_accelerator().current_stream().synchronize() + + print_rank_0(f"{param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}", + force=False) + + def _param_status(self, param): + if param.ds_tensor is not None: + print_rank_0( + f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}" + ) + else: + print_rank_0( + f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}" + ) + + def _allgather_param(self, param, async_op=False, hierarchy=0): + + partition_size = param.ds_tensor.ds_numel + + tensor_size = partition_size * self.num_partitions + aligned_param_size = self._aligned_size(param) + assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}' + + print_rank_0( + f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}" + ) + + see_memory_usage( + f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ', + force=False) + flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1) + see_memory_usage( + f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', + force=False) + + get_accelerator().synchronize() + + print_rank_0( + f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" + ) + # if not flat_tensor.numel() > 100000: + # replicated_tensor = flat_tensor.narrow(0, + # 0, + # param.ds_numel).view(param.ds_shape) + # param.data = replicated_tensor.data + # return None + if self.use_all_gather_into_tensor: + handle = dist.all_gather_into_tensor(flat_tensor, + param.ds_tensor.to(get_accelerator().device_name()), + group=self.get_partition_dp_group(param), + async_op=async_op) + else: + partitions = [] + for i in range(self.num_partitions): + partitions.append(flat_tensor.narrow(0, partition_size * i, partition_size)) + + if i == dist.get_rank(group=self.get_partition_dp_group(param)): + partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True) + + handle = dist.all_gather(partitions, + partitions[self.get_partition_rank()], + group=self.get_partition_dp_group(param), + async_op=async_op) + + replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape) + param.data = replicated_tensor.data + return handle + + def _allgather_params_coalesced(self, param_list, hierarchy=0, quantize=False): + """ blocking call + avoid explicit memory copy in _allgather_params + """ + if len(param_list) == 0: + return + + if self.num_partitions == 1: + handle = _no_gather_coalesced(param_list) + handle.wait() + return None + + # collect local tensors and partition sizes + partition_sizes = [] + local_tensors = [] + if quantize: + quantize_scale_sizes = [] + quantize_scale_tensors = [] + for param in param_list: + partition_sizes.append(param.ds_tensor.ds_numel) + local_tensors.append(param.ds_tensor.to(get_accelerator().device_name())) + if quantize: + quantize_scale_sizes.append(param.ds_tensor.ds_quant_scale.numel()) + quantize_scale_tensors.append(param.ds_tensor.ds_quant_scale.to(get_accelerator().device_name())) + # allocate memory for allgather params + allgather_params = [] + if quantize: + allgather_quantize_scale = [] + for psize in partition_sizes: + tensor_size = psize * self.num_partitions + flat_tensor = torch.empty(tensor_size, dtype=param_list[0].ds_tensor.dtype, + device=self.local_device).view(-1) + flat_tensor.requires_grad = False + allgather_params.append(flat_tensor) + if quantize: + for psize in quantize_scale_sizes: + tensor_size = psize * self.num_partitions + flat_tensor = torch.empty(tensor_size, + dtype=param_list[0].ds_tensor.ds_quant_scale.dtype, + device=self.local_device).view(-1) + flat_tensor.requires_grad = False + allgather_quantize_scale.append(flat_tensor) + + # launch + launch_handles = [] + launch_quantize_handles = [] + for param_idx, param in enumerate(param_list): + input_tensor = local_tensors[param_idx].view(-1) + + if self.use_all_gather_into_tensor: + # try the _all_gather_base from Pytorch master + h = dist.all_gather_into_tensor(allgather_params[param_idx], + input_tensor, + group=self.get_partition_dp_group(param), + async_op=True) + if quantize: + quantize_handle = dist.all_gather_into_tensor(allgather_quantize_scale[param_idx], + quantize_scale_tensors[param_idx], + group=self.get_partition_dp_group(param), + async_op=True) + launch_quantize_handles.append(quantize_handle) + else: + output_list = [] + for i in range(self.num_partitions): + psize = partition_sizes[param_idx] + partition = allgather_params[param_idx].narrow(0, i * psize, psize) + output_list.append(partition) + if not get_accelerator().on_accelerator(partition): + logger.warning( + f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}') + + # back to old all_gather function + h = dist.all_gather(output_list, input_tensor, group=self.get_partition_dp_group(param), async_op=True) + if quantize: + output_scale_list = [] + for i in range(self.num_partitions): + psize = quantize_scale_sizes[param_idx] + partition = allgather_quantize_scale[param_idx].narrow(0, i * psize, psize) + output_scale_list.append(partition) + quant_handle = dist.all_gather(output_scale_list, + quantize_scale_tensors[param_idx], + group=self.get_partition_dp_group(param), + async_op=True) + launch_quantize_handles.append(quant_handle) + launch_handles.append(h) + + # Wait ensures the operation is enqueued, but not necessarily complete. + launch_handles[-1].wait() + if quantize: + for quant_handle in launch_quantize_handles: + quant_handle.wait() + + # assign to param.data (not copy) + for i, param in enumerate(param_list): + gathered_tensor = allgather_params[i] + if quantize: + gathered_tensor = self.quantizer_module.dequantize(gathered_tensor, allgather_quantize_scale[i]) + param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data + + # guarantee the communication to be completed + get_accelerator().synchronize() + + return None + + def _allgather_params(self, param_list, hierarchy=0): + if len(param_list) == 0: + return + + partition_size = sum([param.ds_tensor.ds_numel for param in param_list]) + + tensor_size = partition_size * self.num_partitions + flat_tensor = torch.empty(tensor_size, dtype=param_list[0].ds_tensor.dtype, device=self.local_device) + flat_tensor.requires_grad = False + partitions = [] + for i in range(self.num_partitions): + start = partition_size * i + + partitions.append(flat_tensor.narrow(0, start, partition_size)) + + if i == self.get_partition_rank(): + offset = 0 + for param in param_list: + param_numel = param.ds_tensor.ds_numel + + partitions[i].narrow(0, offset, param_numel).copy_(param.ds_tensor.data) + + offset += param_numel + + if hasattr(param_list[0], 'ds_quant_scale'): + scale_size = sum([param.ds_tensor.ds_quant_scale.numel() for param in param_list]) + scale_tensor_size = scale_size * self.world_size + flat_scale_tensor = torch.empty(scale_tensor_size, + dtype=param_list[0].ds_tensor.ds_quant_scale.dtype, + device=self.local_device) + flat_scale_tensor.requires_grad = False + scale_partitions = [] + for i in range(self.world_size): + start = scale_tensor_size * i + scale_partitions.append(flat_scale_tensor.narrow(0, start, scale_tensor_size)) + if i == self.rank: + offset = 0 + for param in param_list: + param_scale_numel = param.ds_tensor.ds_quant_scale.ds_numel + + scale_partitions[i].narrow(0, offset, + param_scale_numel).copy_(param.ds_tensor.ds_quant_scale.data) + + offset += param_scale_numel + + dist.all_gather_into_tensor(flat_tensor, + partitions[self.get_partition_rank()], + group=self.get_partition_dp_group(param), + async_op=False) + if hasattr(param_list[0], 'ds_quant_scale'): + dist.all_gather(flat_scale_tensor, + param_list[0].ds_quant_scale, + group=self.get_partition_dp_group(param), + async_op=False) + param_offset = 0 + + for param in param_list: + param_partition_size = param.ds_tensor.ds_numel + param_size = param.ds_numel + replicated_tensor = torch.empty(param.ds_shape, dtype=param.ds_tensor.dtype, device=self.local_device) + + for i in range(self.num_partitions): + + start = i * partition_size + + param_start = i * param_partition_size + + if param_start < param_size: + numel_to_copy = min(param_size - param_start, param_partition_size) + + part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy) + + replicated_tensor.view(-1).narrow(0, param_start, numel_to_copy).copy_(part_to_copy) + #param_offset += param.data.numel() + param_offset += param.ds_tensor.ds_numel + if hasattr(param_list[0], 'ds_quant_scale'): + replicated_tensor = self.quantizer_module.dequantize(replicated_tensor, flat_scale_tensor) + param.data = replicated_tensor.data + + return None + + def _reduce_scatter_gradients(self, param_list): + #print_rank_0([param.grad for param in param_list]) + #assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered" + + handles_and_reduced_partitions = [] + for param in param_list: + assert param.grad.numel( + ) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params" + + handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param)) + + for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions): + if handle is not None: + handle.wait() + + # some ranks may have partitions that are padded to go beyond the grad size. + # For these ranks the output of reduce scatter is a separate buffer and needs + # to be copied in + partition_size = param.ds_tensor.ds_numel + start = self.get_partition_rank() * partition_size + end = start + partition_size + #print_rank_0("REduce scatter was executed for param {param.ds_id}") + if start < param.ds_numel < end: + elements = param.ds_numel - start + param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements)) + + def _reduce_scatter_gradient(self, param): + + partition_size = param.ds_tensor.ds_numel + #output = torch.empty(partition_size, dtype=param.dtype, device=param.device) + + total_size = partition_size * self.num_partitions + input_list = [] + + for i in range(self.num_partitions): + + start = i * partition_size + end = start + partition_size + + #print("before reduce scatter gradients") + if start < param.ds_numel and end <= param.ds_numel: + input = param.grad.view(-1).narrow(0, start, partition_size) + else: + input = torch.zeros(partition_size, dtype=param.dtype, device=param.device) + + if start < param.ds_numel: + elements = param.ds_numel - start + input.narrow(0, 0, elements).copy_(param.grad.view(-1).narrow(0, start, elements)) + #print("after reduce scatter gradients") + input_list.append(input) + + rank = dist.get_rank(group=self.get_partition_dp_group(param)) + handle = dist.reduce_scatter(input_list[rank], + input_list, + group=self.get_partition_dp_group(param), + async_op=True) + + return handle, input_list[rank] + + def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False): + if partition_buffers is None: + partition_buffers = [None] * len(param_list) + + for param, partition_buffer in zip(param_list, partition_buffers): + self._partition_gradient(param, partition_buffer=partition_buffer, accumulate=accumulate) + + def _partition_gradient(self, param, partition_buffer=None, accumulate=False): + + #import pdb;pdb.set_trace() + # param.grad=None + # param.grad.test() + print_rank_0( + f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}" + ) + see_memory_usage("Before partitioning gradients", force=False) + partition_size = param.ds_tensor.ds_numel + + if partition_buffer is None: + assert not accumulate, "No buffer to accumulate to" + partition_buffer = torch.zeros(partition_size, dtype=param.dtype, device=param.device) + else: + assert partition_buffer.numel( + ) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}" + + rank = dist.get_rank(group=self.get_partition_dp_group(param)) + start = partition_size * rank + end = start + partition_size + + dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size) + + #print("before partition gradients") + if start < param.ds_numel: + elements = min(param.ds_numel - start, partition_size) + + dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements) + src_tensor = param.grad.view(-1).narrow(0, start, elements) + + # just copy the grad partition to the buffer + if not accumulate: + dest_tensor.copy_(src_tensor) + + # if source and destination are on same device, + # add to the provided buffer + elif src_tensor.device == dest_tensor.device: + dest_tensor.add_(src_tensor) + + # if source and destination are on different device, copy first to src + # then add and move back to the destination. This seems to run faster + # when src is gpu and dest is cpu + # adding directly to cpu is very slow + else: + acc_tensor = torch.empty(src_tensor.numel(), dtype=param.dtype, device=param.device) + + acc_tensor.copy_(dest_tensor) + acc_tensor.add_(src_tensor) + dest_tensor.copy_(acc_tensor) + + # partition_buffer.view(-1).narrow( + # 0, + # 0, + # elements).copy_(param.grad.view(-1).narrow(0, + # start, + # elements)) + + #print("after partition gradients") + param.grad.data = dest_tensor_full_buffer.data + see_memory_usage("After partitioning gradients", force=False) + + def get_partition_dp_group(self, param): + return param.ds_process_group + + def get_partition_rank(self): + """subclass can overload to specify different relative rank in + parameter partition group""" + return self.rank + + @property + def num_partitions(self): + return self.dp_world_size + + def get_dp_process_group(self): + """ Return the communication group with all data-parallel ranks """ + return self.ds_process_group + + +class GatheredParameters: + + def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True): + """A context that collects parameters that were partitioned via a + :class:`deepspeed.zero.Init` context. The parameters are partitioned + again upon exit. + + Args: + params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect. + It's assumed that all parameters are zero params. + modifier_rank (int, optional): If specified, this rank's parameter will be + broadcasted on exit from the context. This argument is required if ``params`` are + modified, so that all processes have a consistent view of the data. Defaults + to ``None``. + fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be + registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`. + enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``. + + Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``) + if you need the GPU memory allocated by gather to be released upon exit from the context manager. + + Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored! + + Examples + ======== + + #. Allocate a partitioned module, initialize its weight on rank 0, and update all + processes. + + .. code-block:: python + + with deepspeed.zero.Init(): + linear = torch.nn.Linear(1000,1000) + + with deepspeed.zero.GatheredParameters(linear.weight, + modifier_rank=0): + if deepspeed.comm.get_rank() == 0: + linear.weight.zero_() + + with deepspeed.zero.GatheredParameters(linear.weight, + modifier_rank=0): + if deepspeed.comm.get_rank() == 0: + linear.weight.zero_() + + #. Collect a partitioned weight to pass to another module during + training. The parameter will be registered as an external parameter + and made available during the backward pass. + + .. code-block:: python + :emphasize-lines: 6 + + def forward(self, input): + x = self.layer1(input) + + # self.layer1.weight is required by self.layer2.forward + with deepspeed.zero.GatheredParameters(self.layer1.weight, + fwd_module=self): + y = self.layer2(x, self.layer1.weight) + return y + + + #. Pretrained model loading + + .. code-block:: python + + with deepspeed.zero.Init(): + model = MyModel() + + state_dict = torch.load(model_path, map_location="cpu") + + def load(module: nn.Module, prefix=""): + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): + if deepspeed.comm.get_rank() == 0: + module._load_from_state_dict(state_dict, prefix) + + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + ".") + + load(model, prefix="") + + If this approach is not used, then the full model will first be copied to each GPU. For models + bigger than the memory of a single GPU, this method is required. + """ + + self.enabled = enabled + if not enabled: + return + + if isinstance(params, Iterable) and not isinstance(params, torch.Tensor): + # deal with generators like model.parameters() + # must convert to list to be able to iterate more than once if we get a generator + params = list(params) + else: + # single param + params = [params] + # enable if at least one is zero-param, otherwise a noop + if not any(is_zero_param(p) for p in params): + self.enabled = False + return + + self.params = [p for p in params if hasattr(p, "ds_id")] + self.params = sorted( + set(self.params), key=lambda x: x.ds_id + ) # remove the duplicates to prevent racing condition, we must also make sure the order is the same on all ranks otherwise we'll get deadlocks + self.src_rank = None + if modifier_rank is not None: + if self.params[0].ds_process_group == dist.get_world_group(): + self.src_rank = modifier_rank + else: + # A group was specified; convert DP rank to global rank + self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, modifier_rank) + self.fwd_module = fwd_module + if self.fwd_module is not None: + # is a no-op if already registered + for p in self.params: + register_external_parameter(self.fwd_module, p) + + def __enter__(self): + if not self.enabled: + return + self.params[0].all_gather(param_list=self.params) + + def __exit__(self, *exc): + if not self.enabled: + return + if self.src_rank is None: + self.params[0].partition(param_list=self.params, has_been_updated=False) + return + + handles = [dist.broadcast(p.data, self.src_rank, group=p.ds_process_group, async_op=True) for p in self.params] + for h in handles: + h.wait() + self.params[0].partition(param_list=self.params, has_been_updated=True) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py new file mode 100644 index 0000000000000000000000000000000000000000..8fc962c4f2a75efbd02f45a8cfb80f1f835bd9a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py @@ -0,0 +1,543 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from dataclasses import dataclass +import collections +from collections import UserDict +from typing import Deque, Set + +from deepspeed import comm as dist +from deepspeed.utils import z3_leaf_module +from deepspeed.utils.logging import logger +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.partition_parameters import * +from deepspeed.runtime.zero.partitioned_param_profiler import PartitionedParameterProfiler +from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus +from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id +from deepspeed.accelerator import get_accelerator +import deepspeed.runtime.compiler as compiler + +import logging + +ENABLE_PROFILER = False + + +def debug_rank0(message: str) -> None: + if dist.get_rank() == 0: + logger.debug(message) + + +@instrument_w_nvtx +def get_all_parameters(sub_module, recurse=False): + return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) + + +def iter_params(module: Module, recurse=False) -> Iterable[Parameter]: + return map(lambda pair: pair[1], get_all_parameters(module, recurse)) + + +class ZeRoTraceMode(Enum): + # Record trace of the network during a single forward+backward (for training) or forward (for inference) + RECORD = 1 + # Use recorded network trace to optimize current forward+backward or forward + COMPLETE = 2 + # Recorded trace does not match current forward+backward or forward pass. + INVALID = 3 + + +class InflightParamRegistry(UserDict): + """registry for parameters in flight""" + + def __setitem__(self, param: Parameter, handle: AllGatherCoalescedHandle) -> None: + if param in self.data: + raise RuntimeError(f"{param.ds_summary()} already in registry") + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"attempted to add non-inflight parameter to registry {param.ds_summary()}") + self.data[param] = handle + + +class PartitionedParameterCoordinator: + FORWARD_FETCH_SUBMIT = 'forward_fetch_submit' + FORWARD_FETCH_WAIT = 'forward_fetch_wait' + FORWARD_PREFETCH_SUBMIT = 'forward_prefetch_submit' + BACKWARD_FETCH_SUBMIT = 'backward_fetch_submit' + BACKWARD_FETCH_WAIT = 'backward_fetch_wait' + BACKWARD_PREFETCH_SUBMIT = 'backward_prefetch_wait' + FORWARD_ALL_GATHER = 'forward_all_gather' + BACKWARD_ALL_GATHER = 'backward_all_gather' + """Handles partitioning and gathering of parameters.""" + + @dataclass + class __ParamInTrace: + param: Parameter + step_id_last_used_at: int + + def __init__( + self, + prefetch_bucket_sz: int, + max_reuse_distance_in_numel: int, + max_available_parameters_in_numel: int, + allgather_stream: get_accelerator().Stream, + inflight_param_registry: InflightParamRegistry, + prefetch_nvme: bool = False, + timers=None, + zero_quantized_weights=False, + zero_quantized_nontrainable_weights=False, + ) -> None: + # mapping of param -> handle for each param that is currently in flight + self.__inflight_param_registry = inflight_param_registry + # keeps track of the number of submodules invoked so far. + self.__step_id: int = 0 + # network tracing mode + self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD + # sequence of submodules/parameters in forward pass + backward pass + self.__submodule_order: Iterable[Module] = [] + self.__param_order: Iterable[__class__.__ParamInTrace] = [] + self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) + self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque()) + # number of available params, and max number of available params + self.__n_available_params: int = 0 + self.__max_n_available_params: int = max_available_parameters_in_numel + # max distance between two use of the module beyond which module is released + self.__max_reuse_dist_in_numel: int = max_reuse_distance_in_numel + # queue for parameters to fetch. parameters will be popped off the left + # side of the dequeue as they are fetched + self.__param_queue: Deque[__class__.__ParamInTrace] = None + self.__prefetch_bucket_sz: int = prefetch_bucket_sz + self.__prefetch_nvme: bool = prefetch_nvme + self.hierarchy: int = 0 + self.zero_quantized_weights = zero_quantized_weights + self.zero_quantized_nontrainable_weights = zero_quantized_nontrainable_weights + + # stream that will be used for allgather operations + self.__allgather_stream: get_accelerator().Stream = allgather_stream + + # limit the number of fetch events that can be queued at once + # otherwise, what happens is memory is allocated by the host thread at the + # time of the call, but not used until later by the asynchronous cuda stream. + # allowing an infinite number of these to queue up causes a lot of memory + # pressure that then becomes detrimental to performance. + # this is a much less elegant way of fixing this vs something like using + # cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now + # because ideally in the future its replaced by an async allocation + # mechanism which doesn't require any configuration by the user. + self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque() + # TODO. make this configurable via JSON + self.__max_ongoing_fetch_events: int = 2 + self.__profiler = PartitionedParameterProfiler(timers if ENABLE_PROFILER else None) + + """Tracing and Tracking + TODO. consider performing trace before initializing PartitionedParameterCoordinator + and passing trace results into constructor. This way all the code in here can + just assume that the trace is complete and the results can be entirely + immutable. + + Bookkeeping operations used to track where we are in the forward/backward pass + """ + + def _clear_trace_structures(self) -> None: + self.__submodule_order = [] + self.__param_order = [] + self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) + self.__param_queue = None + + def is_complete_trace(self) -> bool: + return self.__trace_mode == ZeRoTraceMode.COMPLETE + + def is_invalid_trace(self) -> bool: + return self.__trace_mode == ZeRoTraceMode.INVALID + + def is_record_trace(self) -> bool: + return self.__trace_mode == ZeRoTraceMode.RECORD + + def _invalidate_trace(self) -> None: + if self.is_invalid_trace(): + raise RuntimeError("attempted to invalidate already invalid trace") + self.__trace_mode = ZeRoTraceMode.INVALID + self._clear_trace_structures() + + def trace_prologue(self, sub_module: Module) -> None: + if self.is_complete_trace(): + # sub_module must match expectation else invalidate trace cache + if len(self.__submodule_order) <= self.__step_id: + print_rank_0( + f"Invalidate trace cache @ step {self.__step_id} and module {sub_module.id}: " + f"cache has only {len(self.__submodule_order)} modules", + force=True) + self._invalidate_trace() + return + + if sub_module != self.__submodule_order[self.__step_id]: + expected_module_id = self.__submodule_order[self.__step_id].id + print_rank_0( + f"Invalidate trace cache @ step {self.__step_id}: " + f"expected module {expected_module_id}, but got module {sub_module.id}", + force=True) + self._invalidate_trace() + + @compiler.disable + def record_module(self, sub_module: Module) -> None: + """adds sub module to trace""" + if not self.is_record_trace(): + raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}") + + self.__submodule_order.append(sub_module) + self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id) + + def record_parameters(self, sub_module: Module) -> None: + """adds sub module to trace""" + if not self.is_record_trace(): + raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}") + + step_id = self.__step_id_module_fetched_for[sub_module.id].popleft() + for param in sorted(set(iter_params(sub_module, recurse=z3_leaf_module(sub_module))), key=lambda p: p.ds_id): + self.__param_order.append(__class__.__ParamInTrace(param=param, step_id_last_used_at=step_id)) + + def construct_parameter_trace_from_module_trace(self): + """use module trace to construct parameter trace""" + self.__param_order = [] + for sub_module in self.__submodule_order: + self.record_parameters(sub_module) + + def reset_step(self) -> None: + """indicate that we have completed one fwd+bwd for the model""" + if self.__inflight_param_registry: + raise RuntimeError(f"still have inflight params " + f"{[p.ds_summary() for p in self.__inflight_param_registry.keys()]}") + + if not self.is_complete_trace(): # not self.trace_complete: + # Make sure that recorded submodule orders are identical across ranks + assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order]) + + if self.is_record_trace(): + # Successfully recorded a trace + self.construct_parameter_trace_from_module_trace() + # Make sure that recorded parameter orders are identical across ranks + assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order]) + assert_ints_same_as_other_ranks([p.step_id_last_used_at for p in self.__param_order]) + + self.__submodule_order = tuple(self.__submodule_order) # freeze + self.__param_order = tuple(self.__param_order) # freeze + self.__trace_mode = ZeRoTraceMode.COMPLETE + print_rank_0( + f"completed record trace of {len(self.__submodule_order)} sub modules: {[m.id for m in self.__submodule_order]}", + force=False) + else: + # Enable trace recording for next forward/backward pass + self.__trace_mode = ZeRoTraceMode.RECORD + + else: + if self.__profiler is not None: + self.__profiler.log_events() + + self.__param_queue = collections.deque(self.__param_order) # reset fetch queue + self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) + self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque()) + self.__step_id = 0 + self.__n_available_params = 0 + self.__profiler.reset_events() + + def _dump_params(self, tag, sub_module, params, step_id=None): + if step_id is None: + step_id = self.__step_id + param_names = [debug_param2name_id(p) for p in params] + print_rank_0(f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}', + force=False) + + def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None): + if step_id is None: + step_id = self.__step_id + print_rank_0(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}', force=False) + + """Fetch and Release + Fetching, prefetching, and releasing parameters + """ + + @compiler.disable + @instrument_w_nvtx + @torch.no_grad() + def fetch_sub_module(self, current_submodule: Module, forward: bool) -> None: + """This method does the following (in order): + 1. kick off fetch for parameters in immediately required sub module + 2. kick off fetch for next few parameters we will need later (prefetch) + 3. block on parameters in immediately required sub module + """ + if logger.isEnabledFor(logging.DEBUG): + debug_rank0( + f"{self.__step_id}: M{current_submodule.id}({type(current_submodule).__name__}) P{[p.ds_id for p in iter_params(current_submodule, recurse=z3_leaf_module(current_submodule))]} " + + str({ + "avail": f"{self.__n_available_params:.1e}", + "queue_sz": f"{len(self.__param_queue or [])}", + "inflight": [p.ds_id for p in self.__inflight_param_registry], + })) + + params_to_fetch = frozenset(iter_params(current_submodule, recurse=z3_leaf_module(current_submodule))) + fetch_numel = sum( + [p.partition_numel() for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE]) + + if fetch_numel > 0: + event_name = __class__.FORWARD_FETCH_SUBMIT if forward else __class__.BACKWARD_FETCH_SUBMIT + self._dump_param_ids(event_name, current_submodule.id, + [p.ds_id for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE]) + self.__profiler.start_event(event_name) + # kick off all gather for params in the immediately required submodule + #for param in params_to_fetch: + if logger.isEnabledFor(logging.DEBUG): + for param in params_to_fetch: + debug_rank0(f"-fetch: {param.ds_summary()}") + self.__all_gather_params(params_to_fetch, forward) + self.__profiler.stop_event(event_name, fetch_numel) + + wait_numel = 0 + wait_event_name = __class__.FORWARD_FETCH_WAIT if forward else __class__.BACKWARD_FETCH_WAIT + self.__profiler.start_event(wait_event_name) + # wait for parameters in the immediately needed submodule to become available + for param in params_to_fetch: + param.ds_active_sub_modules.add(current_submodule.id) + if logger.isEnabledFor(logging.DEBUG): + debug_rank0(f"-wait: {param.ds_summary()}") + if param in self.__inflight_param_registry: + wait_numel += param.partition_numel() + with get_accelerator().stream(self.__allgather_stream): + while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query(): + self.__ongoing_fetch_events.popleft() + if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events: + self.__ongoing_fetch_events.popleft().synchronize() + + self.__inflight_param_registry.pop(param).wait() + + if not get_accelerator().handles_memory_backpressure(): + event = get_accelerator().Event() + event.record() + self.__ongoing_fetch_events.append(event) + + assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().current_stream().wait_stream(self.__allgather_stream) + self.__profiler.stop_event(wait_event_name, wait_numel) + + # kick off parameter prefetches for upcoming modules + # don't prefetch if we dont have a completed model trace + if self.is_complete_trace(): + # go through the parameters we need for the current module and pop them + # off the fetch queue so that they aren't prefetched later. + # if params have already been popped off the fetch queue by earlier + # prefetches we won't look for them here + discarded_from_prefetch_queue = set() + params_not_already_fetched = set( + filter(lambda p: self.__most_recent_step_id_param_fetched_for[p] < self.__step_id, params_to_fetch)) + while self.__param_queue and len(discarded_from_prefetch_queue) < len(params_not_already_fetched): + param_in_trace = self.__param_queue.popleft() + self.__most_recent_step_id_param_fetched_for[ + param_in_trace.param] = param_in_trace.step_id_last_used_at + discarded_from_prefetch_queue.add(param_in_trace.param) + + if discarded_from_prefetch_queue != params_not_already_fetched: + raise RuntimeError( + f"tracing error at step {self.__step_id}: \n" + f"module id: {current_submodule.id}, training: {current_submodule.training}\n" + f"expected the next {len(params_not_already_fetched)} parameters in the " + f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n" + f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}.") + + def _is_currently_on_nvme(param): + if param.nvme_swapper is None: + return False + + return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \ + and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE + + # kick off all gather for params in the next few submodules (prefetch) + if self.__prefetch_bucket_sz > 0: + max_params_to_prefetch = min(self.__max_n_available_params - self.__n_available_params, + self.__prefetch_bucket_sz) + params_to_prefetch = set() + numel_prefetching = 0 + while self.__param_queue and numel_prefetching < max_params_to_prefetch: + param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft() + + if _is_currently_on_nvme(param_in_trace.param): + # nvme prefetch is handled elsewhere. Need to break here to preserve fetch order + self.__param_queue.appendleft(param_in_trace) + break + + do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE + if param_in_trace.param in params_to_prefetch: + # Avoid duplicates + do_prefetch = False + + self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \ + max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param], + param_in_trace.step_id_last_used_at) + + if do_prefetch: + params_to_prefetch.add(param_in_trace.param) + numel_prefetching += param_in_trace.param.ds_numel + + if numel_prefetching > 0: + event_name = __class__.FORWARD_PREFETCH_SUBMIT if forward else __class__.BACKWARD_PREFETCH_SUBMIT + self.__profiler.start_event(event_name) + if logger.isEnabledFor(logging.DEBUG): + for param in params_to_prefetch: + debug_rank0(f"-prefetch: {param.ds_summary()}") + self.__all_gather_params(params_to_prefetch, forward) + self.__profiler.stop_event(event_name, numel_prefetching) + + if self.__prefetch_nvme: + self.__prefetch_nvme_param_partitions() + + self.__step_id += 1 + + @instrument_w_nvtx + @torch.no_grad() + def release_sub_module(self, submodule: Module) -> None: + """release the parameters of a sub module, assuming they meet conditions to + be released.""" + params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set( + p.ds_id for p in iter_params(submodule, recurse=z3_leaf_module(submodule)))) + for param in iter_params(submodule, recurse=z3_leaf_module(submodule)): + param.ds_active_sub_modules.discard(submodule.id) + if param.ds_id in params_to_release and not param.is_external_param: + self.__release_param(param) + + @instrument_w_nvtx + @torch.no_grad() + def release_and_reset_all(self, module: Module) -> None: + """release all module parameters""" + for param in iter_params(module, recurse=True): + if param in self.__inflight_param_registry: + raise RuntimeError(f"param {param.ds_summary()} still in flight") + + # TODO. make this throw if if there are still active submodules. currently + # there's a hook execution issue + param.ds_active_sub_modules.clear() + self.__release_param(param) + + for param in iter_params(module, recurse=True): + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(f"{param.ds_summary()} expected to be released") + + @instrument_w_nvtx + def __all_gather_params(self, params: Set[Parameter], forward: bool) -> None: + quantized_params = [] + nonquantized_params = [] + for param in params: + if hasattr(param.ds_tensor, 'ds_quant_scale'): + quantized_params.append(param) + else: + nonquantized_params.append(param) + if quantized_params: + self.__all_gather_params_(quantized_params, forward, quantize=True) + if nonquantized_params: + self.__all_gather_params_(nonquantized_params, forward, quantize=self.zero_quantized_weights) + + def __all_gather_params_(self, params: Set[Parameter], forward: bool, quantize: bool = False) -> None: + """for each partitioned parameter, kick off an async allgather and store + the work handle for the in flight parameters.""" + partitioned_params = [] + all_gather_numel = 0 # numel = num of elements + for param in params: + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + partitioned_params.append(param) + all_gather_numel += param.ds_numel + + if partitioned_params: + self.__n_available_params += all_gather_numel + # here we need to handle a special case where some of the parameters have a valid hpz secondary tensor (e.g. they are not trainable so their secondary tensor never expire) but others do not. + partitioned_params_with_secondary_tensors = [ + p for p in partitioned_params if p.ds_secondary_tensor is not None + ] + partitioned_params_without_secondary_tensors = [ + p for p in partitioned_params if p.ds_secondary_tensor is None + ] + for param_group in [ + partitioned_params_with_secondary_tensors, partitioned_params_without_secondary_tensors + ]: + if not param_group: + continue + with get_accelerator().stream(self.__allgather_stream): + event_name = __class__.FORWARD_ALL_GATHER if forward else __class__.BACKWARD_ALL_GATHER + self.__profiler.start_event(event_name) + handle = param_group[0].all_gather_coalesced(param_group, quantize=quantize) + self.__profiler.stop_event(event_name, all_gather_numel) + for param in param_group: + assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary() + self.__inflight_param_registry[param] = handle + + # Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU + swap_persisted_params = [ + p for p in partitioned_params if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme + ] + if swap_persisted_params: + swap_persisted_params[0].nvme_swapper.remove_partition_and_release_buffers(swap_persisted_params) + + @compiler.disable + @instrument_w_nvtx + def __release_param(self, param: Parameter) -> None: + if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules: + if logger.isEnabledFor(logging.DEBUG): + debug_rank0(f"-release: {param.ds_summary()}") + param.partition() + self.__n_available_params -= param.ds_numel + + @instrument_w_nvtx + @functools.lru_cache(maxsize=None) + def __params_to_release(self, submodule_to_release: Module, step_id: int) -> Set[int]: + if not self.is_complete_trace(): + raise RuntimeError("expected trace to be complete") + + params_to_release = set( + p.ds_id for p in iter_params(submodule_to_release, recurse=z3_leaf_module(submodule_to_release)) + if not p.ds_persist) + + # Problem: When prefetcher scans the param trace, it skips AVAILABLE params. + # This creates issues if those params are released before the skipped uses: + # 1) It hurts performance as the skipped uses are never prefetched. + # 2) For nvme params, we run out of swap buffers because the prefetch order + # diverges from the trace. + # Solution: Don't release params whose reuse was skipped by prefetch. This is + # possible because we detect such skips during prefetch and mark those params. + for param in iter_params(submodule_to_release, recurse=z3_leaf_module(submodule_to_release)): + if self.__most_recent_step_id_param_fetched_for[param] > step_id: + params_to_release.discard(param.ds_id) + + # examine all modules within `max_reuse_dist_in_numel` of the current step, + # if we see any of the candidate parameters to be released reoccur while + # doing this, remove them from the set of parameters to release. + params_traversed = 0 + for module in self.__submodule_order[step_id:]: + if params_traversed >= self.__max_reuse_dist_in_numel: + break + for param in iter_params(module, recurse=z3_leaf_module(submodule_to_release)): + params_to_release.discard(param.ds_id) + params_traversed += param.ds_numel + + return params_to_release + + @instrument_w_nvtx + def __prefetch_nvme_param_partitions(self) -> None: + """swap in parameter partitions from nvme for those parameters that will be used + after the ones that are already being prefetched into full parameters + """ + if not self.is_complete_trace(): + return + + numel_in_flight = sum(param.ds_numel for param in self.__inflight_param_registry) + + numel_considered = 0 + swap_in_params = [] + for param_in_trace in self.__param_queue: + param = param_in_trace.param + if param.nvme_swapper is None: + continue + if (numel_considered > 2 * numel_in_flight + or len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers()): + break + if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: + swap_in_params.append(param) + numel_considered += param.ds_numel + + if swap_in_params: + swap_in_params[0].nvme_swapper.swap_in(swap_in_params, async_op=True) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_profiler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ea11f3b83632a5f7f74343ef7092af6c5bf3a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_profiler.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from dataclasses import dataclass +from deepspeed.utils import log_dist + + +class PartitionedParameterProfiler(object): + + @dataclass + class EventCounter: + name: str + count: int + num_elem: int + + def reset(self): + self.count = 0 + self.num_elem = 0 + + def increment(self, numel): + self.count += 1 + self.num_elem += numel + + def __init__(self, timers): + self.timers = timers + self.event_counters = {} + + def reset_events(self): + for event_ctr in self.event_counters.values(): + event_ctr.reset() + + def start_event(self, name): + if self.timers is None: + return + + if name not in self.event_counters: + self.event_counters[name] = __class__.EventCounter(name=name, count=0, num_elem=0) + self.timers(name).start() + + def stop_event(self, name, num_elem): + if self.timers is None: + return + assert name in self.event_counters, f'unknown event {name}' + self.event_counters[name].increment(num_elem) + self.timers(name).stop() + + def _log_timers(self): + if self.timers is None: + return + self.timers.log(names=list(self.event_counters.keys())) + + def _log_event_counters(self): + for event_ctr in self.event_counters.values(): + log_dist( + f'{event_ctr.name}: count = {event_ctr.count}, numel = {event_ctr.num_elem}', + #f'{event_ctr.name}: time = {self._log_timers()},count = {event_ctr.count}, numel = {event_ctr.num_elem}', + ranks=[0]) + + def log_events(self): + self._log_event_counters() + self._log_timers() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage3.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage3.py new file mode 100644 index 0000000000000000000000000000000000000000..68cab13c4a93960541e88002039054d37d889a15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage3.py @@ -0,0 +1,2817 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +import gc +import collections +from typing import Deque, Dict, Tuple +from deepspeed import comm as dist +from deepspeed.utils import groups + +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from deepspeed.utils import logger +from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler +from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce +from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item +from deepspeed.runtime.zero.partition_parameters import * +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload +from deepspeed.runtime.zero.utils import apply_to_tensors_only +from deepspeed.ops.adam import DeepSpeedCPUAdam +from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus +from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper +from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper +from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper +from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE, LOSS_SCALER +from deepspeed.accelerator import get_accelerator +from deepspeed.utils import z3_leaf_parameter + +# Toggle this to true to enable correctness test +# with gradient partitioning and without +pg_correctness_test = False + +OPTIMIZER_SWAP_IN_STATE_TIMER = 'optimizer_swap_in_state' +INIT_OPTIMIZER_TIMER = 'init_optimizer_state' +OPTIMIZER_SWAP_OUT_STATE_TIMER = 'optimizer_swap_out_state' +OPTIMIZER_STEP_TIMER = 'optimizer_step' + + +def print_rank_0(message, debug=False, force=False): + rank = dist.get_rank() + if rank == 0 and (debug or force): + logger.info(message) + # other variations + # - print for all ranks w/o interleaving + # printflock(f"[{rank}] {message}") + # - print to log file per rank + # log_rank_file(rank, message) + + +def input(msg): + return + + +def isclose(a, b, rtol=1e-09, atol=0.0): + return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) + + +def lcm(x, y): + from fractions import gcd # or can import gcd from `math` in Python 3 + return x * y // gcd(x, y) + + +def move_to_cpu(tensor_list): + for tensor in tensor_list: + tensor.data = tensor.data.cpu() + + +INITIAL_MICRO_STEP_ID = -1 + + +class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer): + """ + DeepSpeedZeroOptimizer designed to reduce the memory footprint + required for training large deep learning models. + + For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models + https://arxiv.org/abs/1910.02054 + + For usage examples, refer to TODO: DeepSpeed Tutorial + + """ + + def __init__( + self, + module, + init_optimizer, + timers, + ds_config, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=True, + contiguous_gradients=True, + reduce_bucket_size=500000000, + prefetch_bucket_size=50000000, + max_reuse_distance=1000000000, + max_live_parameters=1000000000, + param_persistence_threshold=100000, + model_persistence_threshold=sys.maxsize, + dp_process_group=None, + reduce_scatter=True, + overlap_comm=False, + offload_optimizer_config=None, + offload_param_config=None, + sub_group_size=1000000000000, + offload_ratio=0.0, + mpu=None, + clip_grad=0.0, + gradient_accumulation_dtype=torch.float32, + communication_data_type=torch.float16, + postscale_gradients=True, + gradient_predivide_factor=1.0, + gradient_accumulation_steps=1, + elastic_checkpoint=False, + aio_config=None, + all2all_process_group=None, + zero_hpz_partition_size=1, + zero_quantized_weights=False, + zero_quantized_nontrainable_weights=False, + ): + see_memory_usage("Stage 3 initialize beginning", force=True) + + print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False) + + if dist.get_rank() == 0: + logger.info(f"Reduce bucket size {reduce_bucket_size}") + logger.info(f"Prefetch bucket size {prefetch_bucket_size}") + # The fused optimizer does all the work. We need this layer for two reason: + # 1. maintain same user API from apex.fp16_utils + # 2. keep common stuff here in case we need to add ne552w fused optimizer later + + # differences from apex.fp16_utils: + # - assume all model params in fp16 + # - assume all params requires grad + # - flat by groups, not keeping state. TODO: remove state explicitly? + # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") + + self.optimizer = init_optimizer + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + self.dtype = self.optimizer.param_groups[0]['params'][0].dtype + self.gradient_accumulation_dtype = gradient_accumulation_dtype + self._global_grad_norm = 0. + + self.custom_loss_scaler = False + self.external_loss_scale = None + + self.optimizer_swapper = None + self.swap_optimizer = False + + self.offload_optimizer = False + self.offload_optimizer_pin_memory = False + self.offload_optimizer_fast_init = False + self.offload_param = False + self.offload_param_pin_memory = False + self.params_in_nvme_and_cpu = False + self.max_params_in_cpu = 0 + self.partial_offload = offload_ratio + + #num of ranks in a ZeRO param partitioning group + self.zero_hpz_partition_size = zero_hpz_partition_size + + zero_param_parallel_group = groups._get_zero_param_intra_parallel_group() + print_rank_0( + f"ZeRO Stage 3 param partitioning group {self.zero_hpz_partition_size} {zero_param_parallel_group}", + force=False) + if self.zero_hpz_partition_size > 1 and zero_param_parallel_group is None: + self._set_zero_group_parallelism() + zero_param_parallel_group = groups._get_zero_param_intra_parallel_group() + + self.parameter_offload = self.initialize_ds_offload( + module=module, + timers=timers, + ds_config=ds_config, + overlap_comm=overlap_comm, + prefetch_bucket_size=prefetch_bucket_size, + max_reuse_distance=max_reuse_distance, + max_live_parameters=max_live_parameters, + param_persistence_threshold=param_persistence_threshold, + model_persistence_threshold=model_persistence_threshold, + dp_process_group=dp_process_group, + offload_param_config=offload_param_config, + mpu=mpu, + zero_param_parallel_group=zero_param_parallel_group, + zero_quantized_weights=zero_quantized_weights, + zero_quantized_nontrainable_weights=zero_quantized_nontrainable_weights) + + self.persistent_parameters = self.parameter_offload.persistent_parameters + self._configure_offloading(offload_optimizer_config, offload_param_config) + + # backup fused_adam optimizer init + if self.offload_optimizer and self.partial_offload != 1.0: + backup_gpu_tensor = torch.randn(1, device=get_accelerator().device_name()).to(self.dtype) + backup_gpu_param = torch.nn.Parameter(backup_gpu_tensor) + assert type(init_optimizer) == DeepSpeedCPUAdam, 'Hybrid Optimizer Only Supports DeepSpeedCPUAdam' + self.backup_optimizer = torch.optim.AdamW([backup_gpu_param], + lr=self.optimizer.param_groups[0]["lr"], + betas=self.optimizer.param_groups[0]["betas"], + eps=self.optimizer.param_groups[0]["eps"], + weight_decay=self.optimizer.param_groups[0]["weight_decay"], + amsgrad=self.optimizer.param_groups[0]["amsgrad"]) + # Multiple param_groups configs for back-up optimizer + if len(self.optimizer.param_groups) > 1: + for i in range(1, len(self.optimizer.param_groups)): + self.backup_optimizer.add_param_group(self.optimizer.param_groups[i]) + + self.module = module + self.elastic_checkpoint = elastic_checkpoint + + self.inf_or_nan_tracker: Tensor = torch.zeros(1, + dtype=torch.bool, + device=get_accelerator().current_device_name(), + requires_grad=False) + + self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) + + self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu + ### streams used for overlapping computation with communication + self.reduce_and_partition_stream = None if get_accelerator().is_synchronized_device() else get_accelerator( + ).Stream() if overlap_comm else get_accelerator().default_stream() + + ############################################################################ + + self.n_caching_allocator_flushes = 0 + + #-------------Stage 3 Setup-------------------# + + self.timers = timers + + self.all2all_process_group = all2all_process_group + + self.reduce_scatter = reduce_scatter + + self.dp_process_group = self.parameter_offload.dp_process_group + self.sequence_parallel_size = groups._get_sequence_parallel_world_size() + + self.all2all_process_group = all2all_process_group + + self.zero_quantized_nontrainable_weights = zero_quantized_nontrainable_weights + + self.partition_count = dist.get_world_size(group=self.dp_process_group) + + if mpu is None: + self.model_parallel_group = None + self.model_parallel_rank = 0 + else: + self.model_parallel_group = mpu.get_model_parallel_group() + self.model_parallel_rank = mpu.get_model_parallel_rank() + + self.overflow = False + self.clip_grad = clip_grad + self.communication_data_type = communication_data_type + self.gradient_predivide_factor = gradient_predivide_factor + self.postscale_gradients = postscale_gradients + self.gradient_accumulation_steps = gradient_accumulation_steps + self.micro_step_id = 0 + self.reduce_bucket_size = int(reduce_bucket_size) + + if self.all2all_process_group is not None: + assert self.all2all_process_group is not None and self.reduce_scatter == True, "when enable all_to_all_reduce, reduce_scatter should also be enabled for data type checks." + + if self.reduce_scatter: + valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32) + assert self.communication_data_type in valid_reduce_scatter_dtypes, f"ZeRO-3 supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" + assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled" + assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled" + + # Holds the mode parameter + # The param.data may not hold any meaningful data + # when param's status is NOT_AVAILABLE or IN_FLGHT + self.fp16_groups = [] + + # Hold partitioned parameters + self.fp16_partitioned_groups = [] + + # Holds a fused and flattened copy of the parameters + self.fp16_partitioned_groups_flat = [] + self.fp16_partitioned_groups_flat_numel = [] + self.fp16_partitioned_groups_flat_id = [] + + #defragmented pinned memory + self.param_groups_fp16_flat_cpu_memory = [] + + #a single 32-bit partition of the parallel partitioned parameters + #that this process will update + self.fp32_partitioned_groups_flat = [] + self.next_swappable_fp32_partitioned_groups = [] + + # number of elements per partition in each group + self.partition_size = [] + + self.all_reduce_print = False + + self.prefetch_elements = int(prefetch_bucket_size) + + self.contiguous_gradients = contiguous_gradients + + # padding on each partition for alignment purposes + self.groups_padding = [] + + self.sub_group_size = sub_group_size + + self.sub_group_to_group_id = {} + + # Trainable parameters + self.trainable_param_groups = self._get_trainable_parameter_groups() + + see_memory_usage("Before creating fp16 partitions", force=True) + self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups) + num_fp16_subgroups = len(self.fp16_partitioned_groups_flat) + see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", force=True) + + # Optimizer tensor swapping + if self.swap_optimizer: + self._configure_tensor_swapping(offload_optimizer_config, aio_config) + + self.is_gradient_accumulation_boundary: bool = True + + self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque() + # TODO. make this configurable via JSON + self.max_param_reduce_events: int = 2 + + self.param_dict = {} + + # map between param_id and bool to specify if a param is in this partition + self.is_param_in_current_partition = {} + + self.extra_large_param_to_reduce = None + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + + self.params_already_reduced = {} + self.is_gradient_accumulation_boundary = True + self._release_ipg_buffers() + self.previous_reduced_grads = None + + # model parameter traversal-based param id that's stable across runs + for params_group in self.fp16_groups: + for param in params_group: + param_id = self.get_param_id(param) + self.param_dict[param_id] = param + self.params_already_reduced[param_id] = False + + #Largest partitioned param + largest_partitioned_param_numel = 0 + for fp16_partitioned_group in self.fp16_partitioned_groups: + if len(fp16_partitioned_group) > 0: + largest_partitioned_param_numel = max( + largest_partitioned_param_numel, + max([max(tensor.numel(), tensor.ds_numel) for tensor in fp16_partitioned_group])) + + print_rank_0(f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False) + + self._setup_for_real_optimizer() + self.grad_position = {} + self.set_grad_positions() + + if self.offload_optimizer: + self.norm_for_param_grads = {} + + # stores if a partition has been reduced in this step + self.is_partition_reduced = {} + + # stores if a grad in a partition has been computed or not + self.is_grad_computed = {} + + # will store the averaged gradients required by this partition + self.averaged_gradients = {} + + #creates backward hooks for gradient partitioning + ###Calls all gather param + self._grad_acc_hooks = [] + self._leaf_module_hooks = [] + self.create_reduce_and_remove_grad_hooks() + + #exit(0) + + # we may have a way of fusing dynamic scale. Do not support for now + self.loss_scaler = CreateLossScaler(dtype=self.dtype, + static_loss_scale=static_loss_scale, + dynamic_scaling=dynamic_loss_scale, + dynamic_loss_args=dynamic_loss_args) + self.dynamic_loss_scale = self.loss_scaler.dynamic + + self.debug_fp16_grads = [{} for _ in self.fp16_groups] + + self._link_all_hp_params() + + if dist.get_rank(group=self.dp_process_group) == 0: + see_memory_usage(f"After initializing ZeRO optimizer", force=True) + + def destroy(self): + self.parameter_offload.destroy() + for hook in self._grad_acc_hooks: + hook.remove() + for hook in self._leaf_module_hooks: + hook.remove() + print_rank_0("Removed grad acc hooks", force=False) + del self.__ipg_bucket_flat_buffer + + def initialize_ds_offload( + self, + module, + timers, + ds_config, + overlap_comm, + prefetch_bucket_size, + max_reuse_distance, + max_live_parameters, + param_persistence_threshold, + model_persistence_threshold, + dp_process_group, + offload_param_config, + mpu, + zero_param_parallel_group, + zero_quantized_weights, + zero_quantized_nontrainable_weights, + ): + return DeepSpeedZeRoOffload(module=module, + timers=timers, + ds_config=ds_config, + overlap_comm=overlap_comm, + prefetch_bucket_size=prefetch_bucket_size, + max_reuse_distance=max_reuse_distance, + max_live_parameters=max_live_parameters, + param_persistence_threshold=param_persistence_threshold, + model_persistence_threshold=model_persistence_threshold, + dp_process_group=dp_process_group, + offload_param_config=offload_param_config, + mpu=mpu, + zero_param_parallel_group=zero_param_parallel_group, + zero_quantized_weights=zero_quantized_weights, + zero_quantized_nontrainable_weights=zero_quantized_nontrainable_weights) + + def _get_trainable_parameter_groups(self): + param_groups = [] + PARAMS_KEY = "params" + for param_group in self.optimizer.param_groups: + trainable_params = [p for p in param_group[PARAMS_KEY] if p.requires_grad] + if len(trainable_params) == 0: + continue + + trainable_param_group = {} + for key in param_group.keys(): + if key == PARAMS_KEY: + trainable_param_group[PARAMS_KEY] = trainable_params + else: + trainable_param_group[key] = param_group[key] + param_groups.append(trainable_param_group) + + return param_groups + + def _set_zero_group_parallelism(self): + groups._create_zero_param_parallel_group(self.zero_hpz_partition_size) + + def invalidate_secondary_tensor(self): + for fpg in self.fp16_groups: + for param in fpg: + if param.ds_secondary_tensor is not None: + param.ds_secondary_tensor = None + + def _setup_for_real_optimizer(self): + see_memory_usage("Before creating fp32 partitions", force=True) + self._create_fp32_partitions() + see_memory_usage("After creating fp32 partitions", force=True) + dist.barrier() + + # To support pipelined optimizer swapping + self._create_next_swappable_fp32_groups() + + see_memory_usage("Before initializing optimizer states", force=True) + + self.initialize_optimizer_states() + see_memory_usage("After initializing optimizer states", force=True) + dist.barrier() + + if dist.get_rank() == 0: + logger.info(f"optimizer state initialized") + + # IPG + if self.contiguous_gradients: + self.__ipg_bucket_flat_buffer: Tensor = torch.empty(self.reduce_bucket_size, + dtype=self.dtype, + device=get_accelerator().current_device_name()) + + self.grad_partitions_flat_buffer = None + self.__param_id_to_grad_partition: Dict[int, Tensor] = {} + + all_params = list(itertools.chain.from_iterable(self.fp16_groups)) + + self.grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() for p in all_params), + dtype=self.gradient_accumulation_dtype, + device=self.device) + if self.offload_optimizer_pin_memory: + self.grad_partitions_flat_buffer = get_accelerator().pin_memory(self.grad_partitions_flat_buffer) + + offset = 0 + for param in all_params: + self.__param_id_to_grad_partition[param.ds_id] = self.grad_partitions_flat_buffer.narrow( + 0, offset, param.partition_numel()) + offset += param.partition_numel() + + def _link_all_hp_params(self): + for p in self.module.parameters(): + p._z3_optimizer = self + + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + # TODO. factor out to a utility outside of stage3 + @staticmethod + def defragment(tensors: List[Tensor]) -> Tensor: + """move provided tensors into a contiguous flat buffer, with some additional + measures taken to reduce memory fragmentation""" + assert len(set(t.dtype for t in tensors)) == 1 + assert len(set(t.device for t in tensors)) == 1 + + cpu_buffer = torch.empty(sum(p.numel() for p in tensors), + dtype=get_only_unique_item(t.dtype for t in tensors), + device="cpu") + tensor_infos: List[Tuple[Tensor, int, int]] = [] + orig_device = get_only_unique_item(t.device for t in tensors) + + offset = 0 + for tensor in tensors: + tensor_numel = tensor.numel() + # move the tensor from device memory to host memory + cpu_buffer.narrow(0, offset, tensor_numel).copy_(tensor) + tensor.data = torch.empty(0, dtype=tensor.dtype, device=tensor.device) + + # record some data so we can restore the device tensor later + tensor_infos.append((tensor, offset, tensor_numel)) + + offset += tensor_numel + + gc.collect() + get_accelerator().empty_cache() + + # copy tensors (now flattened and contiguous) back to GPU + device_buffer = cpu_buffer.to(orig_device) + + # restore device tensors + for tensor, offset, tensor_numel in tensor_infos: + tensor.data = device_buffer.narrow(0, offset, tensor_numel) + + return device_buffer + + def _get_param_coordinator(self, training): + return self.parameter_offload.get_param_coordinator(training) + + def _configure_offloading(self, offload_optimizer_config, offload_param_config): + ###################### offload optimizer setup ################################## + if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none: + self.offload_optimizer = True + self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory + self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme + self.offload_optimizer_fast_init = offload_optimizer_config.fast_init + + ###################### offload param setup ################################## + if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: + self.offload_param = True + self.offload_param_pin_memory = offload_param_config.pin_memory + self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme + self.max_params_in_cpu = offload_param_config.max_in_cpu + print_rank_0( + f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}", + force=False) + + def _configure_tensor_swapping(self, offload_optimizer_config, aio_config): + nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, 'zero_stage_3') + os.makedirs(nvme_swap_folder, exist_ok=True) + if dist.get_rank() == 0: + logger.info(f'Tensor Swapping: Adding optimizer tensors') + + swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper + + self.optimizer_swapper = swapper_type(swap_config=offload_optimizer_config, + aio_config=aio_config, + base_folder=nvme_swap_folder, + optimizer=self.optimizer, + largest_numel=max(self.fp16_partitioned_groups_flat_numel), + device=self.device, + dtype=torch.float32, + timers=self.timers) + + @property + def elements_in_ipg_bucket(self): + return sum(p.ds_numel for p in self.params_in_ipg_bucket) + + def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False): + '''If flat buffer is None then the parameters in the param_list are + not copied to the flat buffer. This is because they exceed the number of max_params_in_cpu + Some of these parameters may already be in CPU in unflattened buffers + or they maybe in GPU, or they maybe in NVME. If they are in NVME, then + they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are + needed during training.''' + if flat_buffer is None: + # this dst buffer is on NVMe, so skip this + return + + start = 0 + for param in param_list: + src = param.ds_tensor + dest = flat_buffer.narrow(0, start, src.ds_numel) + start = start + src.ds_numel + '''if the parameter was initialized in nvme then bring it to the destination buffer directly''' + if src.status == PartitionedParamStatus.NOT_AVAILABLE: + print_rank_0( + f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU") + param.nvme_swapper.swap_into_buffer(param, dest) + src.data = dest.data + src.status = PartitionedParamStatus.AVAILABLE + else: + assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Param must be available here" + if not avoid_copy: + dest.data.copy_(src.data) + src.data = dest.data + + # Final location must be gpu/cpu in this case + param.ds_tensor.final_location = 'not-nvme' + + def _create_param_groups_fp16_flat_cpu_memory(self): + + aggregate_params_count = 0 + + for j, param_group in enumerate(self.trainable_param_groups): + params_in_group = sum([p.partition_numel() for p in param_group['params']]) + + flat_buffer_size = params_in_group + + if self.params_in_nvme_and_cpu and \ + aggregate_params_count + params_in_group > self.max_params_in_cpu: + + flat_buffer_size = max(0, self.max_params_in_cpu - aggregate_params_count) + + aggregate_params_count += params_in_group + + if flat_buffer_size > 0: + print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False) + self.param_groups_fp16_flat_cpu_memory.append(get_accelerator().pin_memory( + torch.empty(int(flat_buffer_size), dtype=self.dtype))) + else: + print_rank_0(f"No flat buffer size. Param group size was {params_in_group}", force=False) + + self.param_groups_fp16_flat_cpu_memory.append(torch.empty(1, dtype=self.dtype)) + + def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups): + dist.barrier() + + param_groups: List[List[Parameter]] = tuple( + self._create_fp16_sub_groups(param_group["params"]) for param_group in fp16_param_groups) + + # bookkeeping related to param groups + for param_group_idx, param_group in enumerate(param_groups): + for sub_group in param_group: + sub_group_idx = len(self.fp16_groups) + + # record sub group and partitions + self.fp16_groups.append(sub_group) + self.fp16_partitioned_groups.append([param.ds_tensor for param in sub_group]) + + # record sub group -> group mapping + self.sub_group_to_group_id[sub_group_idx] = param_group_idx + + # record total elements of parameter partitions in sub group + self.fp16_partitioned_groups_flat_numel.append(sum(p.partition_numel() for p in sub_group)) + + # record ds_ids of parameter partitions in sub group + self.fp16_partitioned_groups_flat_id.append([p.ds_id for p in sub_group]) + + # record padding required to align group to world size (only applies to last rank) + rank_requires_padding = dist.get_rank( + self.dp_process_group) == dist.get_world_size(self.dp_process_group) - 1 + self.groups_padding.append([p.padding_size() if rank_requires_padding else 0 for p in sub_group]) + + # move parameters to flattened buffer + if not self.offload_param: # partitioned params remain in GPU during training + # move parameter partitions into a single contiguous flat buffer + parameter_partitions: List[Tensor] = [] + for sub_group in self.fp16_groups: + for param in sub_group: + parameter_partitions.append(param.ds_tensor) + device_buffer = __class__.defragment(parameter_partitions) + + # setup flat buffers per subgroup, these are each just sections of the + # contiguous flat buffer for all parameters that we created earlier + offset = 0 + for sub_group in self.fp16_groups: + sub_group_numel = sum(param.partition_numel() for param in sub_group) + self.fp16_partitioned_groups_flat.append(device_buffer.narrow(0, offset, sub_group_numel)) + offset += sub_group_numel + else: # partitioned params offloaded to CPU when not in use + # create a flat CPU memory allocation for each param group + self._create_param_groups_fp16_flat_cpu_memory() + for param_group_idx, param_group in enumerate(param_groups): + flat_offset = 0 + for i, sub_group in enumerate(param_group): + total_elements = sum(p.partition_numel() for p in sub_group) + print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}") + #Flat buffer may not be available for parameters that reside in NVME + if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[ + param_group_idx].numel(): + fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[param_group_idx].narrow( + 0, flat_offset, total_elements) + print_rank_0( + f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elements {flat_offset + total_elements}", + force=False) + + elif self.params_in_nvme_and_cpu: + fp16_partitioned_group_flat = None + print_rank_0(f"No flat buffer for sub group {i} of {total_elements} elements", force=False) + else: + assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please see you max_params_in_cpu and params_in_nvme configs" + + self.fp16_partitioned_groups_flat.append(fp16_partitioned_group_flat) + flat_offset += total_elements + + self._move_to_flat_buffer(sub_group, + fp16_partitioned_group_flat, + avoid_copy=not self.offload_param) + + # if necessary, create a pinned memory buffer to be used for swapping out + # params to NVME after optimizer step + should_create_fp16_flat_reuse_buffer = any(flattened_partition_group is None + for flattened_partition_group in self.fp16_partitioned_groups_flat) + if should_create_fp16_flat_reuse_buffer: + max_partition_numel, largest_partition_numel = 0, None + for sub_group in self.fp16_groups: + total_elements = sum(t.partition_numel() for t in sub_group) + if total_elements > max_partition_numel: + largest_partition_numel = [t.ds_numel for t in sub_group] + max_partition_numel = total_elements + + assert len(largest_partition_numel) > 0, f'Unexpected that largest partition is empty' + self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space(largest_partition_numel) + + def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id): + offset = 0 + elements_in_sub_group = sum([t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]]) + assert (flat_buffer.numel() == elements_in_sub_group) + for param, partitioned_param in zip(self.fp16_groups[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]): + dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel) + if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: + print_rank_0( + f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}" + ) + param.nvme_swapper.swap_in([param], async_op=False) + dest.data.copy_(partitioned_param.data) + param.nvme_swapper.remove_partition_and_release_buffers([param]) + print_rank_0(f"Swapping in {param.ds_id} done") + else: + dest.data.copy_(partitioned_param.data) + offset += partitioned_param.ds_numel + + def _create_next_swappable_fp32_groups(self): + reverse_order_indices = [i for i in range(len(self.fp32_partitioned_groups_flat))] + reverse_order_indices.reverse() + + next_group = None + for i in reverse_order_indices: + self.next_swappable_fp32_partitioned_groups.append(next_group) + if self._swappable_optimizer_subgroup(i): + next_group = self.fp32_partitioned_groups_flat[i] + + self.next_swappable_fp32_partitioned_groups.reverse() + + def _get_sub_group_partitions(self, sub_group_id): + sub_group_partitions = [] + for param, partitioned_param in zip(self.fp16_groups[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]): + if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: + swap_path = param.nvme_swapper.get_path(param, True) + sub_group_partitions.append((partitioned_param, param.partition_numel(), swap_path)) + else: + sub_group_partitions.append((partitioned_param, partitioned_param.ds_numel, None)) + + return sub_group_partitions + + def _create_fp32_partitions(self): + cpu_memory_usage = 0 + cpu_memory_sub_groups = 0 + nvme_memory_usage = 0 + num_swappable_partitions = 0 + num_swap_from_nvme_partitions = 0 + num_swap_from_cpu_partitions = 0 + swap_from_nvme_memory_usage = 0 + swap_from_cpu_memory_usage = 0 + GIGA_BYTES = (1024**3) + + swappable_fp32_tensors = [] + swappable_fp16_src_tensors = [] + nvme_fp16_partitions_info = [] + nvme_fp16_num_elems = [] + nvme_fp32_dest_tensors = [] + fp32_element_size = torch.tensor([], dtype=torch.float32).element_size() + + # Assign portion of subgroup to cpu, the other to gpu. + if self.offload_optimizer: + self.subgroup_to_device = {} + sub_group_size = len(self.fp16_partitioned_groups_flat) + # print(f"Partial offload sub_group_size is {sub_group_size}, ratio is {self.partial_offload}\n") + for i in range(sub_group_size): + if i < int(self.partial_offload * sub_group_size): + self.subgroup_to_device[i] = 'cpu' + else: + self.subgroup_to_device[i] = get_accelerator()._name + + for i, tensor in enumerate(self.fp16_partitioned_groups_flat): + num_elements = self.fp16_partitioned_groups_flat_numel[i] + + # a partition of the fp32 master weights that will be updated by this process + if self._swappable_optimizer_subgroup(i): + self.fp32_partitioned_groups_flat.append(torch.Tensor()) + nvme_memory_usage += (fp32_element_size * num_elements) + num_swappable_partitions += 1 + + if self.params_in_nvme_and_cpu and tensor is None: + num_swap_from_nvme_partitions += 1 + swap_from_nvme_memory_usage += (fp32_element_size * num_elements) + if self.offload_optimizer_fast_init: + sub_group_partitions = self._get_sub_group_partitions(i) + nvme_fp16_partitions_info.append(sub_group_partitions) + nvme_fp16_num_elems.append(num_elements) + nvme_fp32_dest_tensors.append(self.fp32_partitioned_groups_flat[i]) + else: + unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float) + self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i) + self.optimizer_swapper.initialize_parameters(parameters=[self.fp32_partitioned_groups_flat[i]], + src_tensors=[unpinned_fp32_buffer]) + else: + num_swap_from_cpu_partitions += 1 + swap_from_cpu_memory_usage += (fp32_element_size * num_elements) + swappable_fp32_tensors.append(self.fp32_partitioned_groups_flat[i]) + swappable_fp16_src_tensors.append(self.fp16_partitioned_groups_flat[i]) + else: + cpu_memory_usage += (fp32_element_size * num_elements) + cpu_memory_sub_groups += 1 + + if self.params_in_nvme_and_cpu and tensor is None: + unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float) + self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i) + self.fp32_partitioned_groups_flat.append(unpinned_fp32_buffer) + else: + if self.offload_optimizer: + self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to( + self.subgroup_to_device[i]).clone().float().detach()) + else: + self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to( + self.device).clone().float().detach()) + + self.fp32_partitioned_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it + ds_id_begin = str(self.fp16_partitioned_groups_flat_id[i][0]) + ds_id_end = str(self.fp16_partitioned_groups_flat_id[i][-1]) + self.fp32_partitioned_groups_flat[i].ds_id = ds_id_begin + '_' + ds_id_end + + if len(swappable_fp32_tensors) > 0: + self.optimizer_swapper.initialize_parameters(parameters=swappable_fp32_tensors, + src_tensors=swappable_fp16_src_tensors) + + if len(nvme_fp32_dest_tensors) > 0: + fp16_pinned_buffers = self.fp16_groups[0][0].nvme_swapper.reserve_available_buffers() + assert len(fp16_pinned_buffers) > 0 + self.optimizer_swapper.initialize_from_swapped_fp16_params(fp16_partitions_info=nvme_fp16_partitions_info, + fp16_num_elems=nvme_fp16_num_elems, + fp16_pinned_buffers=fp16_pinned_buffers, + fp32_parameters=nvme_fp32_dest_tensors) + self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers() + + nvme_gigabytes = nvme_memory_usage / GIGA_BYTES + print_rank_0(f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB', + force=False) + if self.params_in_nvme_and_cpu: + print_rank_0( + f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage/GIGA_BYTES:5.2f}GB', + force=False) + print_rank_0( + f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage/GIGA_BYTES:5.2f}GB', + force=False) + + cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES + print_rank_0(f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB', + force=False) + + # Clear for on-the-fly population before the optimizer step + for param_group in self.optimizer.param_groups: + param_group['params'] = [] + + def _create_fp16_sub_groups(self, params_group): + + params_group_numel = sum([param.partition_numel() for param in params_group]) + sub_group_size = self.sub_group_size + + if sub_group_size is None or sub_group_size >= params_group_numel: + return [params_group] + + sub_groups = [] + sub_group = [] + local_sub_group_size = 0 + for param in params_group: + + sub_group.append(param) + local_sub_group_size += param.partition_numel() + + if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]): + + sub_groups.append(sub_group) + + sub_group = [] + local_sub_group_size = 0 + + return sub_groups + + def _release_ipg_buffers(self): + if self.contiguous_gradients: + self.ipg_buffer = None + + def _optimizer_step(self, sub_group_id): + param_group_id = self.sub_group_to_group_id[sub_group_id] + fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] + if self.offload_optimizer: + cur_device = self.subgroup_to_device[sub_group_id] + if cur_device == 'cpu': + self.optimizer.param_groups[param_group_id]['params'] = [fp32_param] + cpu_loss = self.optimizer.step() + self.optimizer.param_groups[param_group_id]['params'] = [] + else: + self.backup_optimizer.param_groups[param_group_id]['params'] = [fp32_param] + gpu_loss = self.backup_optimizer.step() + self.backup_optimizer.param_groups[param_group_id]['params'] = [] + else: + self.optimizer.param_groups[param_group_id]['params'] = [fp32_param] + self.optimizer.step() + self.optimizer.param_groups[param_group_id]['params'] = [] + + def _swappable_optimizer_subgroup(self, sub_group_id): + if not self.swap_optimizer: + return False + + return self.optimizer_swapper.swappable_tensor(None, + numel=self.fp16_partitioned_groups_flat_numel[sub_group_id]) + + def _partitioned_params_swap_out(self, i): + offset = 0 + fp32_param = self.fp32_partitioned_groups_flat[i] + assert fp32_param is not None, \ + f'fp32 parameters of sub_group {i} is None' + + swap_fp16_params = [] + swap_fp32_params = [] + for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]): + src = fp32_param.narrow(0, offset, partitioned_param.ds_numel) + if partitioned_param.status == PartitionedParamStatus.AVAILABLE: + partitioned_param.data.copy_(src.data) + else: + swap_fp32_params.append(src) + swap_fp16_params.append(param) + offset += partitioned_param.ds_numel + + if len(swap_fp16_params): + swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params(dst_fp16_params=swap_fp16_params, + src_fp32_params=swap_fp32_params) + + def initialize_optimizer_states(self): + num_subgroups = len(self.fp16_groups) + + largest_numel = max([sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups]) + gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype + gradient_buffer = torch.zeros(int(largest_numel), dtype=gradient_dtype, device=self.device) + + timer_names = set() + + # State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers + # which do lazy initialization of the state at the first call to step. + is_adagrad = isinstance(self.optimizer, torch.optim.Adagrad) + + if self.swap_optimizer: + self.optimizer_swapper.init_timers() + + timer_names.add(INIT_OPTIMIZER_TIMER) + self.timers(INIT_OPTIMIZER_TIMER).start() + + for i, group in enumerate(self.fp16_groups): + swappable_optimizer_subgroup = self._swappable_optimizer_subgroup(i) + swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None + + num_elements = int(self.fp16_partitioned_groups_flat_numel[i]) + + see_memory_usage( + f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}', + force=False) + + if swappable_optimizer_subgroup: + self._optimizer_states_and_gradient_swap_in(i, timer_names) + + if self.offload_optimizer and not swappable_optimizer_subgroup: + subgroup_gradient_buffer = torch.zeros(num_elements, dtype=gradient_dtype, device=self.device) + if self.offload_optimizer_pin_memory: + subgroup_gradient_buffer = get_accelerator().pin_memory(subgroup_gradient_buffer) + + self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer.to(self.subgroup_to_device[i]) + else: + self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements) + + if swappable_param_subgroup: + self._partitioned_params_swap_out(i) + + if swappable_optimizer_subgroup: + self._optimizer_states_and_gradient_swap_out(i, timer_names) + + see_memory_usage( + f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}', + force=False) + + # Initialize the optimizer states with the flattened fp32 partition. + if is_adagrad: + self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults) + + self.timers(INIT_OPTIMIZER_TIMER).stop() + self.timers.log(timer_names) + + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + + if not self.offload_optimizer: + for group in self.fp32_partitioned_groups_flat: + group.grad = None + + # Reset steps + return + + ######################################################################### + #########################ZeRO Partition Gradients######################## + ######################################################################### + + def get_first_param_index(self, group_id, param_group, partition_id): + for index, param in enumerate(param_group): + param_id = self.get_param_id(param) + if partition_id in self.param_to_partition_ids[group_id][param_id]: + return index + return None + + def initialize_gradient_partitioning_data_structures(self): + + total_partitions = dist.get_world_size(group=self.dp_process_group) + + for i, param_group in enumerate(self.fp16_groups): + + self.param_to_partition_ids[i] = {} + self.is_partition_reduced[i] = {} + self.total_grads_in_partition[i] = {} + self.remaining_grads_in_partition[i] = {} + self.is_grad_computed[i] = {} + self.grad_partition_insertion_offset[i] = {} + self.grad_start_offset[i] = {} + self.first_param_index_in_partition[i] = {} + + for partition_id in range(total_partitions): + self.is_grad_computed[i][partition_id] = {} + self.grad_partition_insertion_offset[i][partition_id] = {} + self.grad_start_offset[i][partition_id] = {} + self.initialize_gradient_partition(i, param_group, partition_id) + self.is_partition_reduced[i][partition_id] = False + self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index( + i, param_group, partition_id) + + @instrument_w_nvtx + def independent_gradient_partition_epilogue(self): + self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0) + self.__reduce_and_partition_ipg_grads() + self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0) + + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.synchronize() + + for param_id in self.params_already_reduced.keys(): + self.params_already_reduced[param_id] = False + + #in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad + #TODO: use a similar code path for both cpu_offload and non-cpu offload + if not self.offload_optimizer: + for i, sub_group in enumerate(self.fp16_groups): + #TODO: This is redundant + self.averaged_gradients[i] = [ + self.__param_id_to_grad_partition[param.ds_id] + if param.requires_grad else torch.zeros_like(param.ds_tensor) for param in sub_group + ] + # this method gets called after every backward. need to increment + # here because if it gets incremented in backward() the micro step + # id will be off by one when we do the reduce and partition at the. + # start of this method. + # TODO. make this less error prone + self.micro_step_id += 1 + + def overlapping_partition_gradients_reduce_epilogue(self): + self.independent_gradient_partition_epilogue() + + def create_reduce_and_remove_grad_hooks(self): + print_rank_0(f'[Begin] Create gradient reduction hooks') + self.grad_accs = [] + self.leaf_parameters = defaultdict(list) + for i, param_group in enumerate(self.fp16_groups): + for param in param_group: + if param.requires_grad: + #print_rank_0(f" Before all gather {param.device}, {param.shape}") + print_rank_0(f"Before all gather {param.device}, {param.shape}", force=False) + + # The hook must be created in un-partitioned parameter + param.all_gather() + + #print(f"After all gather {param.device}, {param.shape}") + def wrapper(param): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + @instrument_w_nvtx + def reduce_partition_and_remove_grads(*notneeded): + self.reduce_ready_partitions_and_remove_grads(param) + + self._grad_acc_hooks.append(grad_acc.register_hook(reduce_partition_and_remove_grads)) + self.grad_accs.append(grad_acc) + + #print(f"param grad fn {param.expand_as(param).grad_fn}") + if z3_leaf_parameter(param): + self.leaf_parameters[param.ds_z3_leaf_module].append(param) + else: + wrapper(param) + + # Partition the parameter after creating the hook + param.partition() + + # We delay reduce-scatter for all gradients in the leaf modules until the backward pass of the leaf module is done + for leaf_module, leaf_parameters in self.leaf_parameters.items(): + + def wrapper_pre_hook(params): + + def forward_pre_hook(module, input): + """Pre-forward hook to set backward hook on input tensors to the leaf module""" + module._leaf_module_inputs_remaining = 0 + + @instrument_w_nvtx + def reduce_leaf_module_grads(grad): + module._leaf_module_inputs_remaining -= 1 + # Make sure everything is done in the leaf module + if module._leaf_module_inputs_remaining == 0: + for param in params: + if param.grad is None: + param.grad = torch.zeros_like(param) + self.reduce_ready_partitions_and_remove_grads(param) + + def set_module_bwd_hook(tensor): + if tensor.requires_grad: + module._leaf_module_inputs_remaining += 1 + tensor.register_hook(reduce_leaf_module_grads) + return tensor + + output = apply_to_tensors_only(set_module_bwd_hook, input) + + return output + + return forward_pre_hook + + def wrapper_post_hook(): + + def forward_post_hook(module, input, output): + """Pre-forward hook to set backward hook on input tensors to the leaf module""" + module._leaf_output_required_grad_num = 0 + + def increment_rg_count_bwd_hook(tensor): + if tensor.requires_grad: + module._leaf_output_required_grad_num += 1 + return tensor + + apply_to_tensors_only(increment_rg_count_bwd_hook, output) + + if module._leaf_module_inputs_remaining == 0 and module._leaf_output_required_grad_num > 0: + raise RuntimeError( + "A module cannot be set as a leaf module when it does not have any input tensors that require gradients and has output tensors that require gradients. This is because the gradient reduction hook will not be called in this case." + ) + + return forward_post_hook + + self._leaf_module_hooks.append(leaf_module.register_forward_pre_hook(wrapper_pre_hook(leaf_parameters))) + self._leaf_module_hooks.append(leaf_module.register_forward_hook(wrapper_post_hook())) + + print_rank_0(f'[End] Create gradient reduction hooks') + + def get_param_id(self, param): + return OptimizerSwapper.parameter_id(param) + + def report_ipg_memory_usage(self, tag, param_elems): + elem_count = self.elements_in_ipg_bucket + param_elems + percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size + see_memory_usage( + f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}", + force=False) + + ###############Independent Partition Gradient ######################## + def reduce_independent_p_g_buckets_and_remove_grads(self, param): + #print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True) + + # Because the ipg bucket is initialized with a random place holder tensor, we must + # explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket > + # 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a + # garbage data and `self.average_tensor()` will crash because its params_to_reduce will be + # empty, while reduction_list will have that garbage data. + if self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size and self.elements_in_ipg_bucket > 0: + self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.ds_numel) + + self.__reduce_and_partition_ipg_grads() + + self.__add_grad_to_ipg_bucket(param) + + @instrument_w_nvtx + @torch.no_grad() + def __add_grad_to_ipg_bucket(self, param: Parameter) -> None: + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.wait_stream(get_accelerator().default_stream()) + + if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel() <= self.reduce_bucket_size: + # move the gradient to a contiguous buffer + with get_accelerator().stream(self.reduce_and_partition_stream): + # move the parameter's gradient to the contiguous flat buffer + new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow(0, self.elements_in_ipg_bucket, + param.grad.numel()).view_as(param.grad) + new_grad_tensor.copy_(param.grad, non_blocking=True) + if not get_accelerator().is_synchronized_device(): + param.grad.record_stream(get_accelerator().current_stream()) + param.grad.data = new_grad_tensor + + self.params_in_ipg_bucket.append(param) + + @instrument_w_nvtx + @torch.no_grad() + def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None: + if not self.params_in_ipg_bucket: + return + + for param in self.params_in_ipg_bucket: + if param.grad.numel() != param.ds_numel: + raise RuntimeError(f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter " + f"gradients whose size is not same as the params") + + assert len(set(p.ds_id for p in self.params_in_ipg_bucket)) == len(self.params_in_ipg_bucket) + + while self.param_reduce_events and self.param_reduce_events[0].query(): + self.param_reduce_events.popleft() + if len(self.param_reduce_events) > self.max_param_reduce_events: + self.param_reduce_events.popleft().synchronize() + + with get_accelerator().stream(self.reduce_and_partition_stream): + if safe_mode: + assert_ints_same_as_other_ranks([p.ds_id for p in self.params_in_ipg_bucket]) + + if self.contiguous_gradients and self.elements_in_ipg_bucket <= self.reduce_bucket_size and not self.reduce_scatter: + grad_bucket = self.__ipg_bucket_flat_buffer.narrow(0, 0, self.elements_in_ipg_bucket) + grad_partitions = self.__avg_scatter_contiguous_grads(grad_bucket) + else: + self.params_in_ipg_bucket.sort(key=lambda p: p.ds_id) + grad_partitions = self.__avg_scatter_grads(self.params_in_ipg_bucket) + + self.partition_grads(self.params_in_ipg_bucket, grad_partitions) + + self.params_in_ipg_bucket.clear() + + if not get_accelerator().handles_memory_backpressure(): + event = get_accelerator().Event() + event.record() + self.param_reduce_events.append(event) + + @instrument_w_nvtx + def __avg_scatter_contiguous_grads(self, buffer_to_reduce: Tensor) -> List[Tensor]: + dtype = buffer_to_reduce.dtype + if self.communication_data_type != dtype: + buffer_to_reduce = buffer_to_reduce.to(self.communication_data_type) + if self.postscale_gradients and self.gradient_predivide_factor != 1.0: + buffer_to_reduce = buffer_to_reduce.div_(self.gradient_predivide_factor) + + world_sz = dist.get_world_size(self.dp_process_group) + rank = dist.get_rank(self.dp_process_group) + buffer_to_reduce.div_(world_sz / float(self.sequence_parallel_size)) + + dist.all_reduce(buffer_to_reduce, group=self.dp_process_group) + + if self.postscale_gradients and self.gradient_predivide_factor != world_sz: + buffer_to_reduce = buffer_to_reduce.mul(self.gradient_predivide_factor) + + if self.communication_data_type != self.dtype: + buffer_to_reduce = buffer_to_reduce.to(self.dtype) + + grad_partitions = [] + grad_offset_in_buffer = 0 + for param in self.params_in_ipg_bucket: + grad = param.grad + chunk_sz = math.ceil(grad.numel() / world_sz) + + start_offset = grad_offset_in_buffer + min(rank * chunk_sz, grad.numel()) + end_offset = grad_offset_in_buffer + min(rank * chunk_sz + chunk_sz, grad.numel()) + + partition = buffer_to_reduce[start_offset:end_offset] + if param.partition_numel() != partition.numel(): + padded_partition = torch.zeros(param.partition_numel(), device=grad.device, dtype=grad.dtype) + if partition.numel() > 0: + padded_partition[:partition.numel()] = partition + grad_partitions.append(padded_partition) + else: + grad_partitions.append(partition) + grad_offset_in_buffer += grad.numel() + + return grad_partitions + + @instrument_w_nvtx + def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]: + """average gradients and scatter partitions across ranks""" + + full_grads_for_rank = [p.grad for p in params_to_reduce] + if self.communication_data_type != self.dtype: + full_grads_for_rank = [g.to(self.communication_data_type) for g in full_grads_for_rank] + + if self.postscale_gradients and self.gradient_predivide_factor != 1.0: + full_grads_for_rank = [g.div(self.gradient_predivide_factor) for g in full_grads_for_rank] + + local_world_size = get_accelerator().device_count() + global_world_size = dist.get_world_size() + num_nodes = global_world_size // local_world_size + if self.all2all_process_group is not None and num_nodes > 1: + grad_partitions_for_rank = all_to_all_quant_reduce(full_grads_for_rank, self.all2all_process_group) + else: + grad_partitions_for_rank = reduce_scatter_coalesced(full_grads_for_rank, self.dp_process_group) + + if self.postscale_gradients and self.gradient_predivide_factor != 1.0 and self.gradient_predivide_factor != dist.get_world_size( + self.dp_process_group): + grad_partitions_for_rank = [g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank] + + if self.communication_data_type != self.dtype: + grad_partitions_for_rank = [g.to(self.dtype) for g in grad_partitions_for_rank] + + return grad_partitions_for_rank + + def set_grad_positions(self): + for i, group in enumerate(self.fp16_groups): + current_offset = 0 + for param in group: + param_id = self.get_param_id(param) + num_elements = param.partition_numel() + + self.grad_position[param_id] = [int(i), int(current_offset), int(num_elements)] + #print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}") + current_offset += num_elements + see_memory_usage(f"After Set Grad positions", force=False) + + def _constant_buffered_norm2(self, input, buffer_size=250000000): + norm = None + for part in input.view(-1).split(buffer_size): + if norm is None: + norm = part.data.double().norm(2)**2.0 + else: + norm += part.data.double().norm(2)**2.0 + return norm**0.5 + + def set_norm_for_param_grad_in_gpu(self, param): + param_id = self.get_param_id(param) + #self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2) + #Using a more memory efficient version + self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad) + + def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor): + with get_accelerator().stream(self.copy_grad_stream): + param_id = self.get_param_id(param) + src_tensor = param.grad.view(-1).float() + #print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}") + fp32_grad_tensor.copy_(src_tensor, non_blocking=True) + param.grad = None + + def complete_grad_norm_calculation_for_cpu_offload(self, params): + total_norm = 0.0 + norm_type = 2.0 + for p in params: + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_id = self.get_param_id(p) + if param_id in self.norm_for_param_grads.keys(): + param_norm = self.norm_for_param_grads[param_id] + total_norm += param_norm**2 + + # Sum across all model parallel GPUs. + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) + + total_norm = total_norm_cuda[0]**(1. / norm_type) + + norm_is_inf = total_norm.isinf() + norm_is_nan = total_norm.isnan() + inf_or_nan = norm_is_nan.logical_or(norm_is_inf) + + err = torch.tensor(-1.0, device=self.device, dtype=torch.float) + total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm + + return total_norm + + @instrument_w_nvtx + def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: + offload_fp32_gradients = {} + offload_fp32_offsets = {} + buffers = [] + for param, grad_partition in zip(params_to_release, grad_partitions): + + contains_real_data = param.partition_numel() * dist.get_rank(self.dp_process_group) < param.ds_numel + if not contains_real_data: + # this grad partition is empty - don't need to do anything + param.grad = None + continue + + # move or accumulate gradient partition to target buffer + grad_buffer = self.__param_id_to_grad_partition[param.ds_id].narrow(0, 0, grad_partition.numel()) + buffers.append(grad_buffer) + if self.micro_step_id == 0: # don't accumulate + grad_buffer.copy_(grad_partition, non_blocking=True) + # ensure grad buffer is a CUDA buffer to speed up the next few + # operations and so it can be used asynchronously + grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True) + elif get_accelerator().on_accelerator(grad_buffer): + grad_buffer.add_(grad_partition.to(self.gradient_accumulation_dtype).view(grad_buffer.shape)) + else: + # if dst is CPU, copy first to src device, do the addition + # there, then move back to dst. adding directly to cpu is very slow + cuda_grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True) + cuda_grad_buffer.add_(grad_partition.to(self.gradient_accumulation_dtype).view(cuda_grad_buffer.shape)) + grad_buffer.copy_(cuda_grad_buffer, non_blocking=True) + # ensure grad buffer is a CUDA buffer to speed up the next few + # operations and so it can be used asynchronously + grad_buffer = cuda_grad_buffer + + # offload the gradient partition if applicable + if self.offload_optimizer: + i, dest_offset, _ = self.grad_position[self.get_param_id(param)] + + if self.is_gradient_accumulation_boundary: + self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_buffer) + + if self._swappable_optimizer_subgroup(i): + if not i in offload_fp32_gradients.keys(): + offload_fp32_gradients[i] = [] + offload_fp32_offsets[i] = [] + + offload_fp32_gradients[i].append(grad_buffer.float()) + offload_fp32_offsets[i].append(dest_offset) + else: + fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow( + 0, dest_offset, grad_buffer.numel()) + fp32_grad_tensor.copy_(grad_buffer) + + # free the gradient + if not get_accelerator().is_synchronized_device(): + param.grad.record_stream(get_accelerator().current_stream()) + param.grad = None + + if self.offload_optimizer and self.swap_optimizer: + for i in offload_fp32_gradients.keys(): + self.optimizer_swapper.swap_out_gradients(parameter=self.fp32_partitioned_groups_flat[i], + gradient_offsets=offload_fp32_offsets[i], + gradient_tensors=offload_fp32_gradients[i]) + return buffers + + def reduce_ready_partitions_and_remove_grads(self, param): + #print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True) + self.reduce_independent_p_g_buckets_and_remove_grads(param) + + def zero_reduced_gradients(self, partition_id, i): + + def are_all_related_partitions_reduced(params_id): + for partition_id in self.param_to_partition_ids[i][params_id]: + if not self.is_partition_reduced[i][partition_id]: + return False + return True + + for params_id in self.is_grad_computed[i][partition_id]: + if are_all_related_partitions_reduced(params_id): + self.param_dict[params_id].grad = None + + def quantize_nontrainable_params(self): + """ In ZeRO-3, when the zero_quantized_nontrainable_weights flag is set, we quantize the non-trainable weights and also store them in quantized format. However, this check for trainable/non-trainable is done when deepspeed initializes the partitioning. So, if the user changes the trainable/non-trainable status of a parameter after the partitioning is done (e.g. LoRA), the user needs to re-quantize the non-trainable weights by calling this function. + """ + if not self.zero_quantized_nontrainable_weights: + print_rank_0( + f"Warning: quantize_nontrainable_params() called with zero_quantized_nontrainable_weights disabled, return without doing anything", + force=True) + return + quantizer_module = CUDAQuantizer() + + def quantize_dstensor(tensor): + assert tensor.dtype == torch.float16, f"quantize_dstensor() expects tensor.dtype == torch.float16, got {tensor.dtype}" + partition_size = tensor.ds_numel + ds_status = tensor.status + final_location = tensor.final_location + tensor, tensor.ds_quant_scale = quantizer_module.quantize(tensor) + tensor.ds_numel = partition_size + tensor.status = ds_status + tensor.final_location = final_location + tensor.requires_grad = False + return tensor + + for param in self.module.parameters(): + if hasattr(param, "ds_tensor") and (param.ds_tensor.numel() <= 2048 or param.ds_numel <= 500000): + # skip small parameters + continue + if hasattr(param, + "ds_tensor") and not param.requires_grad and not hasattr(param.ds_tensor, "ds_quant_scale"): + param.ds_tensor = quantize_dstensor(param.ds_tensor) + if hasattr(param, "ds_secondary_tensor") and not param.requires_grad and not hasattr( + param.ds_secondary_tensor, "ds_quant_scale") and param.ds_secondary_tensor is not None: + param.ds_secondary_tensor = quantize_dstensor(param.ds_secondary_tensor) + get_accelerator().synchronize() + + def flatten_and_print(self, message, tensors, start=0, n=5): + flatten_tensor = self.flatten(tensors) + + def print_func(): + logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) + + self.sequential_execution(print_func, message) + + def get_grads_to_reduce(self, i, partition_id): + + def get_reducible_portion(key): + grad = self.param_dict[key].grad + total_elements = grad.numel() + start = self.grad_start_offset[i][partition_id][key] + num_elements = min(total_elements - start, + self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key]) + if not pg_correctness_test: + if num_elements == total_elements: + return grad + else: + return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements)) + else: + if num_elements == total_elements: + return grad.clone() + else: + return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements)) + + grads_to_reduce = [] + for key in self.is_grad_computed[i][partition_id]: + grad = get_reducible_portion(key) + grads_to_reduce.append(grad) + return grads_to_reduce + + def sequential_execution(self, function, message, group=None): + if group is None: + group = self.dp_process_group + if dist.get_rank(group=group) == 0: + logger.info(message) + for id in range(dist.get_world_size(group=group)): + if id == dist.get_rank(group=group): + function() + dist.barrier(group=group) + + def set_none_gradients_to_zero(self, i, partition_id): + for param_id in self.is_grad_computed[i][partition_id]: + param = self.param_dict[param_id] + if param.grad is None: + param.grad = torch.zero_like(param) + + ######################Reduction Related Methods############################## + + def allreduce_bucket(self, bucket, rank=None, log=None): + rank = None + tensor = self.flatten(bucket) + + tensor_to_allreduce = tensor + + if pg_correctness_test: + communication_data_type = torch.float32 + else: + communication_data_type = self.communication_data_type + + if communication_data_type != tensor.dtype: + tensor_to_allreduce = tensor.to(communication_data_type) + + tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group) / float(self.sequence_parallel_size)) + + if rank is None: + # "All Reducing" + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + else: + global_rank = dist.get_global_rank(self.dp_process_group, rank) + dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group) + + if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + tensor.copy_(tensor_to_allreduce) + + return tensor + + # if rank is specified do a reduction instead of an allreduce + def allreduce_and_copy(self, small_bucket, rank=None, log=None): + with get_accelerator().stream(self.reduction_stream): + allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log) + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): + buf.copy_(synced) + + def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None): + small_bucket = [] + numel = 0 + for tensor in bucket: + small_bucket.append(tensor) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy(small_bucket, rank=rank, log=None) + small_bucket = [] + if len(small_bucket) > 0: + self.allreduce_and_copy(small_bucket, rank=rank, log=log) + + ############################################################################# + ############################################################################# + ############################################################################# + + # views the tensor as multiple partitions and returns + # those partitions + def get_data_parallel_partitions(self, tensor): + partitions = [] + + dp = dist.get_world_size(group=self.dp_process_group) + dp_id = dist.get_rank(group=self.dp_process_group) + + total_num_elements = tensor.numel() + + base_size = total_num_elements // dp + remaining = total_num_elements % dp + + start = 0 + for id in range(dp): + partition_size = base_size + if id < remaining: + partition_size = partition_size + 1 + partitions.append(tensor.narrow(0, start, partition_size)) + start = start + partition_size + return partitions + + def get_partition_info(self, tensor_list, partition_size, partition_id): + params_in_partition = [] + params_not_in_partition = [] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for tensor in tensor_list: + + tensor_size = tensor.numel() + + if start_index <= current_index < end_index: + params_in_partition.append(tensor) + + elif current_index < start_index < (current_index + tensor_size): + params_in_partition.append(tensor) + + assert (first_offset == 0 + ), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + else: + params_not_in_partition.append(tensor) + + current_index = current_index + tensor_size + + return params_in_partition, params_not_in_partition, first_offset + + @instrument_w_nvtx + def zero_grad(self, set_to_none=True): + """ + Zero FP16 parameter grads. + """ + self.micro_step_id = 0 + + # FP32 grad should never exist. + # For speed, set model fp16 grad to None by default + for group in self.fp16_groups: + for p in group: + if set_to_none: + if p.grad is not None and get_accelerator().on_accelerator(p.grad): + p.grad.record_stream(get_accelerator().current_stream()) + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def _model_parallel_all_reduce(self, tensor, op): + """ Perform all reduce within model parallel group, if any. + """ + if self.model_parallel_group is None: + pass + else: + dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) + + @instrument_w_nvtx + def get_grad_norm_direct(self, gradients, params, norm_type=2): + """Clips gradient norm of an iterable of parameters. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(g.data.abs().max() for g in gradients) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group) + + # Take max across all GPUs. + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX) + total_norm = total_norm_cuda[0] + else: + # if dist.get_rank() == 0: + # logger.info(f"Total Norm beginning {total_norm}") + grad_norms = [] + for g, p in zip(gradients, params): + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + grad_norms.append(g.to(get_accelerator().device_name(), non_blocking=True).double().norm(2)) + + # Sum across all model parallel GPUs. + if len(grad_norms) == 0: + # FIX https://github.com/microsoft/DeepSpeed/issues/3564 + total_norm_cuda = torch.tensor(0, + dtype=gradients[0].dtype).to(get_accelerator().device_name()).double() + else: + total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2)) + + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) + + total_norm = total_norm_cuda**(1. / norm_type) + + norm_is_inf = total_norm.isinf() + norm_is_nan = total_norm.isnan() + inf_or_nan = norm_is_nan.logical_or(norm_is_inf) + + err = torch.tensor(-1.0, device=self.device, dtype=torch.float) + total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm + + return total_norm + + # creates a flat fused tensor from the tensor list starting at the first_offset + # in the first tensor of the list. If there are not enough elements in the tensor + # list then the flat tensor will be padded with zeros + def get_flat_partition(self, tensor_list, first_offset, partition_size, return_tensor_list=False): + flat_tensor_list = [] + current_size = 0 + for i, tensor in enumerate(tensor_list): + if tensor.grad is None: + tensor.grad = torch.zeros_like(tensor) + + tensor = tensor.grad + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_size): + num_elements = partition_size - current_size + + # we need a narrow view of the tensor based on the tensor offset and number of elements that + # we need from this tensor + if tensor_offset > 0 or num_elements < tensor.numel(): + flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements))) + else: + flat_tensor_list.append(tensor) + + current_size = current_size + num_elements + + # this means its the last partition and does not align with the dp boundary. We need to pad before flattening + if current_size < partition_size: + flat_tensor_list.append( + torch.zeros(int(partition_size - current_size), + dtype=tensor_list[0].dtype, + device=tensor_list[0].device)) + + if return_tensor_list: + return flat_tensor_list + + return self.flatten(flat_tensor_list) + + def free_grad_in_param_list(self, param_list): + for p in param_list: + p.grad = None + + def reset_cpu_buffers(self): + self.norm_for_param_grads = {} + + def _pre_step(self): + self.micro_step_id = 0 + + print_rank_0(f"Inside Step function") + see_memory_usage(f"In step before checking overflow", force=False) + + print_rank_0("Finished Tracing at Beginning of Step") + self._get_param_coordinator(training=True).hierarchy = 0 + + print_rank_0("Finished Tracing at Beginning of Step") + + @instrument_w_nvtx + def _get_norm_groups(self): + norm_groups = [] + for i, group in enumerate(self.fp16_groups): + if self.offload_optimizer: + norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.fp16_groups[i])) + else: + norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i])) + return norm_groups + + @instrument_w_nvtx + def _prepare_fp32_grad_for_sub_group(self, sub_group_id): + partition_id = dist.get_rank(group=self.dp_process_group) + + single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to( + self.fp32_partitioned_groups_flat[sub_group_id].dtype) + + assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \ + "averaged gradients have different number of elements that partition size {} {} {} {}".format( + single_grad_partition.numel(), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, partition_id) + + self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition + + # release all the gradient since we have already created a necessary copy in dp_grad_partition + self.zero_grad(set_to_none=True) + + if not get_accelerator().is_synchronized_device(): + for grad in filter(lambda g: get_accelerator().on_accelerator(g), self.averaged_gradients[sub_group_id]): + grad.record_stream(get_accelerator().current_stream()) + + self.averaged_gradients[sub_group_id] = None + + @instrument_w_nvtx + def _prepare_sub_group(self, sub_group_id, timer_names): + see_memory_usage(f'Before prepare optimizer sub group {sub_group_id}', force=False) + if self._swappable_optimizer_subgroup(sub_group_id): + self._optimizer_states_and_gradient_swap_in(sub_group_id, timer_names) + elif not self.offload_optimizer: + self._prepare_fp32_grad_for_sub_group(sub_group_id) + see_memory_usage(f'After prepare optimizer sub group {sub_group_id}', force=False) + + def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names): + param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] + fp32_param_id = self.get_param_id(self.fp32_partitioned_groups_flat[sub_group_id]) + assert self._swappable_optimizer_subgroup(sub_group_id), \ + f'Parameter {fp32_param_id} of numel={param_length} is not swappable' + + see_memory_usage(f'pre-step Before swapping in optimizer tensors {sub_group_id}', force=False) + timer_names.add(OPTIMIZER_SWAP_IN_STATE_TIMER) + self.timers(OPTIMIZER_SWAP_IN_STATE_TIMER).start() + + self.optimizer_swapper.swap_in_optimizer_state( + parameter=self.fp32_partitioned_groups_flat[sub_group_id], + async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id]) + + self.timers(OPTIMIZER_SWAP_IN_STATE_TIMER).stop() + see_memory_usage(f'pre-step After swapping in optimizer tensors {sub_group_id}', force=False) + + @instrument_w_nvtx + def _release_sub_group(self, sub_group_id, timer_names): + see_memory_usage(f'Before release optimizer sub group {sub_group_id}', force=False) + # get rid of the fp32 gradients. Not needed anymore + if not self.offload_optimizer: + self.fp32_partitioned_groups_flat[sub_group_id].grad = None + + if self._swappable_optimizer_subgroup(sub_group_id): + self._optimizer_states_and_gradient_swap_out(sub_group_id, timer_names) + see_memory_usage(f'After release optimizer sub group {sub_group_id}', force=False) + + # create a flat tensor aligned at the alignment boundary + @instrument_w_nvtx + def flatten_dense_tensors_aligned(self, tensor_list, alignment): + num_elements = 0 + for tens in tensor_list: + num_elements = num_elements + tens.numel() + + remaining = num_elements % alignment + + if remaining: + elements_to_add = alignment - remaining + pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype) + padded_tensor_list = tensor_list + [pad_tensor] + + num_elements = num_elements + elements_to_add + else: + padded_tensor_list = tensor_list + + return self.flatten(padded_tensor_list) + + def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names): + param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] + fp32_param_id = self.get_param_id(self.fp32_partitioned_groups_flat[sub_group_id]) + assert self._swappable_optimizer_subgroup(sub_group_id), \ + f'Parameter {fp32_param_id} of numel={param_length} is not swappable' + + see_memory_usage(f'post-step Before swapping out optimizer tensors {sub_group_id}', force=False) + timer_names.add(OPTIMIZER_SWAP_OUT_STATE_TIMER) + self.timers(OPTIMIZER_SWAP_OUT_STATE_TIMER).start() + + self.optimizer_swapper.swap_out_optimizer_state( + parameter=self.fp32_partitioned_groups_flat[sub_group_id], + async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is not None) + + self.timers(OPTIMIZER_SWAP_OUT_STATE_TIMER).stop() + see_memory_usage(f'post-step After swapping out optimizer tensors {sub_group_id}', force=False) + + # get rid of the fp32 gradients. Not needed anymore + self.fp32_partitioned_groups_flat[sub_group_id].grad = None + + def _unflatten_partitioned_parameters(self, sub_group_id): + updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]) + + for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): + partitioned_param.data = q.data + + def _overflow_clean_up(self, prev_scale): + see_memory_usage('After overflow before clearing gradients', force=False) + self.zero_grad(set_to_none=True) + + if self.offload_optimizer: + self.reset_cpu_buffers() + else: + self.averaged_gradients = {} + + see_memory_usage('After overflow after clearing gradients', force=False) + + @instrument_w_nvtx + def _overflow_check_and_loss_scale_update(self): + + # First compute norm for all group so we know if there is overflow + if self.dtype == torch.float16: + self.check_overflow() + + #loss scaling related computation + prev_scale = self.loss_scale + self._update_scale(self.overflow) + + if self.overflow: + self._overflow_clean_up(prev_scale) + + return self.overflow + + @instrument_w_nvtx + def _post_step(self, timer_names): + if self.offload_optimizer: + self.reset_cpu_buffers() + + #Gathering persisting parameters + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].all_gather(self.persistent_parameters) + + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + + self.invalidate_secondary_tensor() + + self.timers.log(timer_names) + + see_memory_usage('After zero_optimizer step', force=False) + print_rank_0(f"------------------Finishing Step-----------------------") + + @instrument_w_nvtx + def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id): + if self.fp16_partitioned_groups_flat[sub_group_id] is not None: + self.fp16_partitioned_groups_flat[sub_group_id].data.copy_( + self.fp32_partitioned_groups_flat[sub_group_id].data) + + #unflatten fp16 parameter subgroup + self._unflatten_partitioned_parameters(sub_group_id) + else: + self._partitioned_params_swap_out(sub_group_id) + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + + @instrument_w_nvtx + def step(self, closure=None): + """ + Not supporting closure. + """ + self._pre_step() + self._partition_all_parameters() + + #checks for overflow, adjust the loss scale accordingly + if self._overflow_check_and_loss_scale_update(): + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + return + + norm_groups = self._get_norm_groups() + scaled_global_grad_norm = torch.norm(torch.stack(norm_groups)) + + # Stash unscaled gradient norm + self._global_grad_norm = scaled_global_grad_norm / self.loss_scale + + timer_names = set() + + timer_names.add(OPTIMIZER_STEP_TIMER) + self.timers(OPTIMIZER_STEP_TIMER).start() + + #update parameters one sub group at a time + for sub_group_id, group in enumerate(self.fp16_groups): + + #prepare optimizer states, gradients and fp32 parameters for update + self._prepare_sub_group(sub_group_id, timer_names) + + #scale the fp32 gradients + self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm) + + #apply the optimizer step on the sub group and copy fp32 parameters to fp16 + self._optimizer_step(sub_group_id) + + #put fp16 parameters in appropriate location + self._reassign_or_swap_out_partitioned_parameters(sub_group_id) + + #release memory or swap out optimizer states of fp32 parameters + self._release_sub_group(sub_group_id, timer_names) + + self.timers(OPTIMIZER_STEP_TIMER).stop() + + self._post_step(timer_names) + + # warn user about caching allocator flushes + memory_stats = get_accelerator().memory_stats() + alloc_retries = memory_stats.get("num_alloc_retries") + if alloc_retries is None: + alloc_retries = 0 + if alloc_retries > self.n_caching_allocator_flushes: + if dist.get_rank() == 0: + logger.warning( + "%d pytorch allocator cache flushes since last step. this happens " + "when there is high memory pressure and is detrimental to " + "performance. if this is happening frequently consider adjusting " + "settings to reduce memory consumption. If you are unable to " + "make the cache flushes go away consider adding " + "get_accelerator().empty_cache() calls in your training loop to ensure " + "that all ranks flush their caches at the same time", + alloc_retries - self.n_caching_allocator_flushes) + self.n_caching_allocator_flushes = alloc_retries + + def dump_pre_step_gradients(self, debug_fp32_grads): + # Dump gradient norms for debugging + for i, _ in enumerate(self.fp16_groups): + print(f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC') + for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]): + param_id = self.get_param_id(fp16_param) + fp16_grad_norm = self.debug_fp16_grads[i][param_id] + + fp32_grad_norm = [float(t.data.float().norm(2)) for t in fp32_grad] + norm_list = [fp16_grad_norm, fp32_grad_norm] + print(f'Pre-Step Norms {i} {param_id} = {norm_list}') + + def dump_post_step_gradients(self): + # Dump gradient norms for debugging + for i, group in enumerate(self.fp16_groups): + print(f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT') + unflat_fp16 = self.unflatten(self.fp16_groups_flat[i], self.fp16_groups[i]) + unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], self.fp16_groups[i]) + for j, p in enumerate(self.fp16_groups[i]): + param_id = self.get_param_id(p) + param_norm = float(p.data.float().norm(2)) + ds_norm = float(p.ds_tensor.data.float().norm(2)) + + unflat_norm = [float(t.data.float().norm(2)) for t in [unflat_fp16[j], unflat_fp32[j]]] + norm_list = [param_norm, ds_norm] + unflat_norm + print(f'Post-Step Norms {i} {param_id} = {norm_list}') + + @instrument_w_nvtx + def unscale_and_clip_grads(self, sub_group_id, total_norm): + # compute combined scale factor for this group + combined_scale = self.loss_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.loss_scale + + self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale) + + def _check_overflow(self, partition_gradients=True): + self.overflow = self.has_overflow(partition_gradients) + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params, is_grad_list=False): + for p in params: + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + def has_overflow_partitioned_grads_serial(self): + for i in range(len(self.fp16_groups)): + for j, grad in enumerate(self.averaged_gradients[i]): + if grad is not None and self._has_inf_or_nan(grad.data, j): + return True + return False + + @instrument_w_nvtx + def has_overflow(self, partition_gradients=True): + if partition_gradients: + with get_accelerator().stream(self.reduce_and_partition_stream): + if hasattr(self.inf_or_nan_tracker, "logical_or_"): + self.inf_or_nan_tracker.logical_or_(torch.isinf(self.grad_partitions_flat_buffer).any()) + self.inf_or_nan_tracker.logical_or_(torch.isnan(self.grad_partitions_flat_buffer).any()) + else: + # logical_or_ not available in older versions of pytorch + self.inf_or_nan_tracker += torch.isinf(self.grad_partitions_flat_buffer).any() + self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any() + self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 + + overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8) + self.inf_or_nan_tracker.zero_() + + if not get_accelerator().resolves_data_dependency(): + get_accelerator().default_stream().wait_stream(self.reduce_and_partition_stream) + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group) + + else: + params = [] + for group in self.fp16_groups: + for param in group: + params.append(param) + + overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients) + overflow_gpu = get_accelerator().ByteTensor([overflow]) + + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the model parallel GPUs + self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) + + overflow = overflow_gpu[0].item() + return bool(overflow) + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x, j=None): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: + return True + return False + + @instrument_w_nvtx + def backward(self, loss, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + if self.swap_optimizer: + self.optimizer_swapper.pre_backward() + + see_memory_usage(f"Before backward", force=False) + + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + + self._get_param_coordinator(training=True).reset_step() + + if self.swap_optimizer: + self.optimizer_swapper.post_backward() + + def get_fp32_grad_partitions(self) -> Dict[int, Dict[int, Tensor]]: + """get fp32 gradient partition dictionary + accessed as grad_dict[parameter_group_index][parameter_index] + """ + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.synchronize() + grad_dict = collections.defaultdict(dict) + if self.offload_optimizer: + for group in self.fp16_groups: + for param_idx, param in enumerate(group): + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements) + grad_dict[group_idx][param_idx] = fp32_grad + else: + for group_idx, group in self.averaged_gradients.items(): + for param_idx, gradient in enumerate(group): + grad_dict[group_idx][param_idx] = gradient.float() + + return grad_dict + + def _fp32_state_allgather(self, param, fp32_state_partition): + reduce_buffer = torch.zeros(self.partition_count * fp32_state_partition.numel(), + dtype=torch.float32, + device=param.device) + my_rank = dist.get_rank(group=self.dp_process_group) + partition = reduce_buffer.narrow(0, fp32_state_partition.numel() * my_rank, fp32_state_partition.numel()) + partition.data.copy_(fp32_state_partition.data, non_blocking=False) + dist.all_gather_into_tensor(reduce_buffer, partition, group=self.dp_process_group) + return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape) + + def get_fp32_grad_for_param(self, param) -> Tensor: + if not param.requires_grad: + return None + + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.synchronize() + + if self.offload_optimizer: + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements) + else: + fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float() + + return self._fp32_state_allgather(param, fp32_grad) + + def _get_fp32_opt_state_partition(self, param, optim_state_key=None): + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.synchronize() + + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_in(group_idx) + + fp32_param = self.fp32_partitioned_groups_flat[group_idx] + if optim_state_key is None: + fp32_opt_state = fp32_param.narrow(0, dest_offset, num_elements) + else: + fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow(0, dest_offset, num_elements) + + return fp32_opt_state, group_idx + + def get_full_hp_param(self, param, optim_state_key=None) -> Tensor: + if not param.requires_grad: + return None + + fp32_opt_state, group_idx = self._get_fp32_opt_state_partition(param, optim_state_key) + hp_param = self._fp32_state_allgather(param, fp32_opt_state) + + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_out(group_idx) + + return hp_param + + def set_full_hp_param(self, value, param, optim_state_key=None): + if not param.requires_grad: + return + + assert value.numel( + ) == param.ds_numel, f" Number of elements do not match: {value.numel()} != {param.ds_numel}" + + fp32_opt_state_partition, group_idx = self._get_fp32_opt_state_partition(param, optim_state_key) + my_rank = dist.get_rank(group=self.dp_process_group) + value_partition = value.flatten().narrow(0, + fp32_opt_state_partition.numel() * my_rank, + fp32_opt_state_partition.numel()) + fp32_opt_state_partition.data.copy_(value_partition.data) + + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_out(group_idx) + + ### Local API START ### + + def get_local_fp32_param(self, param, optim_state_key=None) -> Tensor: + if not param.requires_grad: + return None + fp32_opt_state, group_idx = self._get_fp32_opt_state_partition(param, optim_state_key) + return fp32_opt_state + + def get_local_fp32_grad_for_param(self, param) -> Tensor: + if not param.requires_grad: + return None + + if not get_accelerator().resolves_data_dependency(): + self.reduce_and_partition_stream.synchronize() + + if self.offload_optimizer: + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements) + else: + fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float() + return fp32_grad + + def set_local_hp_param(self, value, param, optim_state_key=None): + if not param.requires_grad: + return + + assert hasattr(param, "ds_tensor"), f" The parameter does not contain the partitioned copy of the tensor." + assert value.numel() == param.ds_tensor.numel( + ), f" Number of elements do not match: {value.numel()} != {param.ds_tensor.ds_numel}" + + fp32_opt_state_partition, group_idx = self._get_fp32_opt_state_partition(param, optim_state_key) + value_partition = value.flatten() + fp32_opt_state_partition.data.copy_(value_partition.data) + + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_out(group_idx) + logger.info(f"[set_local_hp_param][update the params' value successfully]") + + ### Local API END ### + + @instrument_w_nvtx + def _partition_all_parameters(self): + self.parameter_offload.partition_all_parameters() + + def check_overflow(self, partition_gradients=True): + self._check_overflow(partition_gradients) + + def _update_scale(self, has_overflow=False): + self.loss_scaler.update_scale(has_overflow) + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + self.trainable_param_groups = self._get_trainable_parameter_groups() + + param_groups = property(_get_param_groups, _set_param_groups) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.loss_scaler.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + cur_scale = property(_get_loss_scale, _set_loss_scale) + + def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings): + # Remove paddings from flattened tensor + individual_tensors = self.unflatten(padded_flattened_tensor, group_tensors) + lean_lengths = [t.numel() - pad for t, pad in zip(group_tensors, paddings)] + lean_tensors = [t[:len] for t, len in zip(individual_tensors, lean_lengths)] + #logger.info(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}') + return lean_tensors + + #TODO REVISIT this for stage 3 + def get_lean_optimizer_state(self): + # Return optimizer states after removing paddings. + # This method assumes that each param group contains a single flattened tensor. + optimizer_groups_state = [] + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + lean_state = {} + for key, value in self.optimizer.state[p].items(): + if torch.is_tensor(value): + padded_lens = [t.numel() for t in self.fp16_partitioned_groups[i]] + lean_state[key] = self._get_lean_tensors(value, self.fp16_partitioned_groups[i], + self.groups_padding[i]) + lean_flat_len = sum([t.numel() for t in lean_state[key]]) + else: + lean_state[key] = value + + optimizer_groups_state.append(lean_state) + + return optimizer_groups_state + + def get_groups_without_padding(self, groups_with_padding): + # Return group tensor after removing paddings added for alignment to DP world size. + groups_without_padding = [] + for i, group in enumerate(groups_with_padding): + lean_group = self._get_lean_tensors(group, self.fp16_partitioned_groups[i], self.groups_padding[i]) + groups_without_padding.append(lean_group) + + return groups_without_padding + + def _set_fp32_optimizer_param_groups(self): + for sub_group_id, _ in enumerate(self.fp16_groups): + param_group_id = self.sub_group_to_group_id[sub_group_id] + self.optimizer.param_groups[param_group_id]['params'].append( + self.fp32_partitioned_groups_flat[sub_group_id]) + + def _clear_fp32_optimizer_param_groups(self): + for param_group in self.optimizer.param_groups: + param_group['params'] = [] + + def _rigid_state_dict(self): + state_dict = {} + state_dict[ZERO_STAGE] = ZeroStageEnum.weights + state_dict[LOSS_SCALER] = self.loss_scaler + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['overflow'] = self.overflow + state_dict[PARTITION_COUNT] = self.partition_count + + self._set_fp32_optimizer_param_groups() + state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() + state_dict[FP32_FLAT_GROUPS] = self.fp32_partitioned_groups_flat + self._clear_fp32_optimizer_param_groups() + + return state_dict + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + Example:: + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + if self.elastic_checkpoint: + raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.") + + return self._rigid_state_dict() + + +# Restore base optimizer fp32 weights from checkpoint by: +# 1) Merging fp32 weights from checkpoints of all partitions +# 2) Extracting fp32 weights for current partition from merged weights +# 3) Using extracted weights to update base optimizer weights directly. + + def _restore_from_fp32_weights(self, all_state_dict): + + flat_local_partition = [] + for i in range(len(self.fp32_partitioned_groups_flat)): + merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict] + flat_local_partition.append(self._get_flattened_partition(merged_partitions)) + + for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition): + current.data.copy_(saved.data) + + # Restore base optimizer fp32 weights from ZeRO fp16 weights + def _restore_from_bit16_weights(self): + for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat, + self.fp32_partitioned_groups_flat): + fp32_partition.data.copy_(fp16_partitions.data) + + # Refresh the fp32 master params from the fp16 copies. + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + # Extract flattened partition for current rank from all partitions + def _get_flattened_partition(self, all_partition_states): + partition_id = dist.get_rank(group=self.dp_process_group) + alignment = dist.get_world_size(group=self.dp_process_group) + + param_partitions = [[] for _ in range(len(all_partition_states[0]))] + for i, partition in enumerate(all_partition_states): + for j, param in enumerate(partition): + param_partitions[j].append(param) + + local_state_partitions = [] + for param_index, param_slices in enumerate(param_partitions): + flattened_merged_tensor = self.flatten_dense_tensors_aligned(param_slices, alignment) + new_partitions = self.get_data_parallel_partitions(flattened_merged_tensor) + local_state_partitions.append(new_partitions[partition_id]) + + if torch.is_tensor(local_state_partitions[0]): + return self.flatten_dense_tensors_aligned(local_state_partitions, alignment) + + # Assume non-tensor states are not partitioned and equal across ranks, so return first one + return local_state_partitions[0] + + # Restore base optimizer state from checkpoint by + # 1) Merging optimizer state from checkpoints of all partitions + # 2) Extracting optimizer state for current partition from the merged state + # 3) Using the extracted value to directly update the base optimizer. + def _restore_base_optimizer_state(self, all_state_dict): + base_optimizer_group_states = [] + for i in range(len(self.optimizer.param_groups)): + partition_states = {} + all_partition_group_states = [sd['base_optimizer_state'][i] for sd in all_state_dict] + for key in all_partition_group_states[0].keys(): + all_partition_states = [all_states[key] for all_states in all_partition_group_states] + partition_states[key] = self._get_flattened_partition(all_partition_states) + base_optimizer_group_states.append(partition_states) + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + for key, saved in base_optimizer_group_states[i].items(): + if torch.is_tensor(self.optimizer.state[p][key]): + self.optimizer.state[p][key].data.copy_(saved.data) + else: + self.optimizer.state[p][key] = saved + + def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True): + # I think it should actually be ok to reload the optimizer before the model. + self.loss_scaler = state_dict[LOSS_SCALER] + self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] + self.overflow = state_dict['overflow'] + + if load_optimizer_states: + self._set_fp32_optimizer_param_groups() + self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) + self._clear_fp32_optimizer_param_groups() + + if self.swap_optimizer or self.params_in_nvme_and_cpu: + # Purge the swapped optimizer state, it was initialized to the freshly created model and not the checkpoint + for swap_info in self.optimizer_swapper.swap_params_info.values(): + swap_info.tensors = [swap_info.tensors[0]] + swap_info.has_state_tensors = False + + if self.swap_optimizer: + # Touch all parameters to synchronize all buffers + timer_names = set() + self._partition_all_parameters() + for sub_group_id, group in enumerate(self.fp16_groups): + self._prepare_sub_group(sub_group_id, timer_names) + self._reassign_or_swap_out_partitioned_parameters(sub_group_id) + self._release_sub_group(sub_group_id, timer_names) + self._post_step(timer_names) + + # restore fp32 partitions + for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict[FP32_FLAT_GROUPS]): + curr_param.data.copy_(saved_param.data) + + # restore fp16 partitions from fp32 + for sub_group_id in range(len(self.fp32_partitioned_groups_flat)): + fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] + if sum(fp32_param.size()) > 0: + fp16_param = self.fp16_partitioned_groups_flat[sub_group_id] + fp16_param.data.copy_(fp32_param.data) + + # update fp16 unflattened params + for sub_group_id in range(len(self.fp16_partitioned_groups_flat)): + updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]) + + for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): + partitioned_param.data = q.data + + # TODO: Support different/changing load/save DP degree. + def load_state_dict(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False, + checkpoint_folder=None, + load_serial=None): + r"""Loading a ZeRO checkpoint + Arguments: + state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. + Note that the number of saved partitions may differ from number of loading partitions to support + changing GPU count, specifically DP world size, between saving and loading checkpoints. + load_optimizer_states: Boolean indicating whether or not to load base optimizer states + load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). + """ + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + Example:: + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + + if self.elastic_checkpoint: + raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.") + + self._rigid_load_state_dict(state_dict_list[dist.get_rank(group=self.dp_process_group)], + load_optimizer_states=load_optimizer_states) + + # when use loading checkpoint serial, after finish loading, we need to + # delete the temp state_dict_list variable to save memory, then trigger + # the next rank's loading + if load_serial is not None: + load_serial += 1 + rank = dist.get_rank(group=self.dp_process_group) + local_rank = dist.get_local_rank() + del state_dict_list[rank] + rank_end = dist.get_world_size() - 1 + if local_rank != rank_end: + dist.send(tensor=load_serial, dst=rank + 1) + + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].partition(self.persistent_parameters) + # self.persistent_parameters[0].all_gather(self.persistent_parameters) # this will be done in checkpoint_event_epilogue() so remove it to prevent double all_gather + + def reset_swap_buffers(self): + timer_names = set() + for sub_group_id, group in enumerate(self.fp16_groups): + self._prepare_sub_group(sub_group_id, timer_names) + self._reassign_or_swap_out_partitioned_parameters(sub_group_id) + self._release_sub_group(sub_group_id, timer_names) + + def checkpoint_event_prologue(self): + self._partition_all_parameters() + + def checkpoint_event_epilogue(self): + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].all_gather(self.persistent_parameters) + + def empty_partition_cache(self): + self.parameter_offload.empty_partition_cache() + + +def _handle_overflow(cpu_sum, x, i): + import math + rank = dist.get_rank() + if rank == 0: + t_i = -1 + for v_i, v in enumerate(x.data.contiguous().view(-1)): + if not math.isfinite(float(v)): + t_i = v_i + break + logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") + + +def estimate_zero3_model_states_mem_needs(total_params, + largest_layer_params, + num_gpus_per_node=1, + num_nodes=1, + cpu_offload=True, + cpu_offload_params=True, + zero_init=True, + additional_buffer_factor=1.5): + + total_gpus = num_nodes * num_gpus_per_node + gpus_factor = 1 / num_nodes + largest_layer_memory = (4 * largest_layer_params) + + if cpu_offload: + if cpu_offload_params: + gpu_mem = largest_layer_memory + + if zero_init: + cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor + else: + cpu_mem = total_params * max(4 * num_gpus_per_node, 18 * gpus_factor) * additional_buffer_factor + + else: + gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus) + + if zero_init: + cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor + else: + cpu_mem = total_params * max(4 * num_gpus_per_node, 16 * gpus_factor) * additional_buffer_factor + else: + gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus) + if zero_init: + cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor + else: + cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor + + return int(cpu_mem), int(gpu_mem), largest_layer_memory + + +def model_to_params(model): + # shared params calculated only once + total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) + + largest_layer_params = 0 + for m in model.modules(): + # assuming no shared params within a single layer + layer_params = sum(p.numel() for p in m.parameters(recurse=False)) + largest_layer_params = max(largest_layer_params, layer_params) + + return total_params, largest_layer_params + + +def estimate_zero3_model_states_mem_needs_all_live(model, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients + for a given ``model`` and hardware setup. + + If you have an actual model object, use this function and everything will be derived + automatically. + + If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + Args: + - ``model``: ``nn.Module`` object + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + total_params, largest_layer_params = model_to_params(model) + + estimate_zero3_model_states_mem_needs_all_cold(total_params=total_params, + largest_layer_params=largest_layer_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + additional_buffer_factor=additional_buffer_factor) + + +def estimate_zero3_model_states_mem_needs_all_cold(total_params, + largest_layer_params, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients + for a given ``model`` and hardware setup. + + If it's a hypothetical model, use this function where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything + will be derived automatically. + + Args: + - ``total_params``: total model params + - ``largest_layer_params``: largest layer's params + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + def format_options(cpu_offload, cpu_offload_params, zero_init): + enabled = [] + padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}' + param_device = padded_cpu_str if cpu_offload_params else "none" + enabled.append(f"offload_param={param_device}") + optimizer_device = padded_cpu_str if cpu_offload else "none" + enabled.append(f"offload_optimizer={optimizer_device}") + enabled.append(f"zero_init={1 if zero_init else 0}") + return ", ".join(enabled) + + nodes_str = "nodes" if num_nodes > 1 else "node" + gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" + print( + "Estimated memory needed for params, optim states and gradients for a:\n" + f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" + f"SW: Model with {int(total_params/1e6)}M total params, {int(largest_layer_params/1e6)}M largest layer params." + ) + print(" per CPU | per GPU | Options") + for cpu_offload in [True, False]: + for cpu_offload_params in [True, False]: + if not cpu_offload and cpu_offload_params: + continue + for zero_init in [True, False]: + cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs( + total_params=total_params, + largest_layer_params=largest_layer_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + cpu_offload=cpu_offload, + cpu_offload_params=cpu_offload_params, + zero_init=zero_init, + additional_buffer_factor=additional_buffer_factor) + + options_str = format_options(cpu_offload=cpu_offload, + cpu_offload_params=cpu_offload_params, + zero_init=zero_init) + print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}") diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py new file mode 100644 index 0000000000000000000000000000000000000000..225c085f6f2bebd83229c2127a1004a3f029f89f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py @@ -0,0 +1,2519 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed import comm as dist +from packaging import version as pkg_version +from collections import OrderedDict +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler +from deepspeed.runtime.utils import (empty_cache, see_memory_usage, inf, is_model_parallel_parameter, + align_dense_tensors, all_gather_dp_groups) +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.ops.adam import DeepSpeedCPUAdam +from deepspeed.utils import logger +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank +from deepspeed.moe.utils import is_moe_param +from deepspeed.git_version_info import version + +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.accelerator import get_accelerator + +from deepspeed.checkpoint.constants import (DS_VERSION, GROUP_PADDINGS, PARTITION_COUNT, LOSS_SCALER, + SINGLE_PARTITION_OF_FP32_GROUPS, BASE_OPTIMIZER_STATE, + BASE_OPTIMIZER_STATE_STEP, CLIP_GRAD, ZERO_STAGE, PARAM_SLICE_MAPPINGS) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state +from deepspeed.checkpoint import enable_universal_checkpoint + +from deepspeed.utils import groups +# Toggle this to true to enable correctness test +# with gradient partitioning and without +pg_correctness_test = False + +OPTIMIZER_ALLGATHER_TIMER = 'optimizer_allgather' +OPTIMIZER_GRADIENTS_TIMER = 'optimizer_gradients' +OPTIMIZER_STEP_TIMER = 'optimizer_step' +OPTIMIZER_TIMERS = [OPTIMIZER_ALLGATHER_TIMER, OPTIMIZER_GRADIENTS_TIMER, OPTIMIZER_STEP_TIMER] + + +def input(msg): + return + + +def split_half_float_double(tensors): + device_type = get_accelerator().device_name() + dtypes = [ + "torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type), + "torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type) + ] + buckets = [] + for i, dtype in enumerate(dtypes): + bucket = [t for t in tensors if t.type() == dtype] + if bucket: + buckets.append(bucket) + return buckets + + +def isclose(a, b, rtol=1e-09, atol=0.0): + return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) + + +def lcm(x, y): + from fractions import gcd # or can import gcd from `math` in Python 3 + return x * y // gcd(x, y) + + +def get_alignment_padding(tensor_list, alignment): + num_elements = sum([tensor.numel() for tensor in tensor_list]) + remainder = num_elements % alignment + return (alignment - remainder) if remainder else remainder + + +def print_rank_msg(msg): + print(f"rank {dist.get_rank()} - {msg}") + + +def _get_padded_tensor(src_tensor, size): + if src_tensor.numel() >= size: + return src_tensor + padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) + slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) + slice_tensor.data.copy_(src_tensor.data) + return padded_tensor + + +def _pad_tensor_by_size(src_tensor, pad_size, dtype, device): + padded_tensor = torch.zeros(src_tensor.numel() + pad_size, dtype=dtype, device=device) + padded_tensor.data[:src_tensor.numel()].copy_(src_tensor.data) + return padded_tensor + + +class DeepSpeedZeroOptimizer(ZeROOptimizer): + """ + DeepSpeedZeroOptimizer designed to reduce the memory footprint + required for training large deep learning models. + + For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models + https://arxiv.org/abs/1910.02054 + + For usage examples, refer to TODO: DeepSpeed Tutorial + + """ + + def __init__(self, + init_optimizer, + param_names, + timers, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=True, + contiguous_gradients=True, + reduce_bucket_size=500000000, + use_multi_rank_bucket_allreduce=True, + allgather_bucket_size=5000000000, + dp_process_group=None, + expert_parallel_group=None, + expert_data_parallel_group=None, + reduce_scatter=True, + overlap_comm=False, + offload_optimizer_config=None, + mpu=None, + clip_grad=0.0, + gradient_accumulation_dtype=torch.float32, + communication_data_type=torch.float16, + postscale_gradients=True, + gradient_predivide_factor=1.0, + gradient_accumulation_steps=1, + ignore_unused_parameters=True, + partition_grads=True, + round_robin_gradients=False, + has_moe_layers=False, + fp16_master_weights_and_gradients=False, + elastic_checkpoint=False): + + if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none: + self.cpu_offload = True + self.cpu_offload_pin_memory = offload_optimizer_config.pin_memory + else: + self.cpu_offload = False + self.cpu_offload_pin_memory = False + + if dist.get_rank() == 0: + logger.info(f"Reduce bucket size {reduce_bucket_size}") + logger.info(f"Allgather bucket size {allgather_bucket_size}") + logger.info(f"CPU Offload: {self.cpu_offload}") + logger.info(f'Round robin gradient partitioning: {round_robin_gradients}') + # The fused optimizer does all the work. We need this layer for two reason: + # 1. maintain same user API from apex.fp16_utils + # 2. keep common stuff here in case we need to add ne552w fused optimizer later + + self.elastic_checkpoint = elastic_checkpoint + self.param_names = param_names + self.mpu = mpu + # differences from apex.fp16_utils: + # - assume all model params in fp16 + # - assume all params requires grad + # - flat by groups, not keeping state. TODO: remove state explicitly? + # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? + if not get_accelerator().is_available(): + raise SystemError("Accelerator is not detected, cannot perform low precision training (e.g., fp16, bf16).") + self.optimizer = init_optimizer + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + # ZeRO stage 1 (False) or 2 (True) + self.partition_gradients = partition_grads + self.zero_stage_string = "ZeRO-2" if partition_grads else "ZeRO-1" + + self.timers = timers + + self.reduce_scatter = reduce_scatter + + self.overlap_comm = overlap_comm + + self.deepspeed_adam_offload = self.cpu_offload + + self.device = get_accelerator().current_device_name() if not self.cpu_offload else 'cpu' + + self.dp_process_group = dp_process_group + self.sequence_parallel_size = groups._get_sequence_parallel_world_size() + #expert parallel group + self.ep_process_group = expert_parallel_group + + #data parallel group for experts + self.expert_dp_process_group = expert_data_parallel_group + + #data parallel size for non-experts + dp_size = dist.get_world_size(group=self.dp_process_group) + + #For MoE models this maybe different for different param group + #It will be modified during MoE setup later in the init + self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + self.partition_count = [dp_size for i in range(len(self.optimizer.param_groups))] + + self.is_gradient_accumulation_boundary = True + + # CPU-Offload requires contiguous gradients + self.contiguous_gradients = contiguous_gradients or self.cpu_offload + + self.has_moe_layers = has_moe_layers + if self.has_moe_layers: + self._configure_moe_settings() + self._global_grad_norm = 0. + + if mpu is None: + self.model_parallel_group = None + self.model_parallel_world_size = 1 + self.model_parallel_rank = 0 + else: + self.model_parallel_group = mpu.get_model_parallel_group() + self.model_parallel_world_size = mpu.get_model_parallel_world_size() + self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu) + + self.overflow = False + self.clip_grad = clip_grad + self.communication_data_type = communication_data_type + self.gradient_predivide_factor = gradient_predivide_factor + self.postscale_gradients = postscale_gradients + self.gradient_accumulation_steps = gradient_accumulation_steps + self.micro_step_id = 0 + self.ignore_unused_parameters = ignore_unused_parameters + self.round_robin_gradients = round_robin_gradients + + self.extra_large_param_to_reduce = None + self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients + + if self.fp16_master_weights_and_gradients: + assert self.cpu_offload and type(self.optimizer) in [DeepSpeedCPUAdam], \ + f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32."\ + f"Currently only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}." \ + f"Either disable fp16_master_weights_and_gradients or enable {self.zero_stage_string} Offload with DeepSpeedCPUAdam." + + if self.reduce_scatter and self.partition_gradients: + valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32) + assert self.communication_data_type in valid_reduce_scatter_dtypes, f"{self.zero_stage_string} supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" + assert self.gradient_predivide_factor == 1.0, f"gradient_predivide_factor != 1.0 is not yet supported with {self.zero_stage_string} with reduce scatter enabled" + assert self.postscale_gradients, f"pre-scale gradients is not yet supported with {self.zero_stage_string} with reduce scatter enabled" + + # param flattened by groups + self.bit16_groups = [] + self.bit16_groups_flat = [] + + # param partitioned by data parallel degree + # this will contain a list of equal sized tensors + # each of which will be updated by a different process + self.parallel_partitioned_bit16_groups = [] + + # a single 32-bit partition of the parallel partitioned parameters + # that this process will update + self.single_partition_of_fp32_groups = [] + + # param partition info + + # These are the parameters in each group that will not be updated by this process directly + self.params_not_in_partition = [] + + # These are the parameters that will be updated by this process directly + self.params_in_partition = [] + + # Offset from the first parameter in the self.params_in_partition + # the parameter boundaries may not align with partition boundaries + # so we need to keep track of the offset + self.first_offset = [] + + # number of elements per partition in each group + self.partition_size = [] + + # align nccl all-gather send buffers to 4-byte boundary + self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 + + assert ( + allgather_bucket_size % self.nccl_start_alignment_factor == 0 + ), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} " + + self.all_reduce_print = False + self.dtype = self.optimizer.param_groups[0]['params'][0].dtype + self.gradient_accumulation_dtype = gradient_accumulation_dtype + + if self.dtype != self.gradient_accumulation_dtype: + self.use_separate_grad_accum = True + else: + self.use_separate_grad_accum = False + if self.use_separate_grad_accum and not self.partition_gradients: + self.use_grad_accum_attribute = True + else: + self.use_grad_accum_attribute = False + + self.round_robin_bit16_groups = [] + self.round_robin_bit16_indices = [] + self.round_robin_bit16_meta = [] + + # Use different parallel to do all_to_all_reduce related things + # padding on each partition for alignment purposes + self.groups_padding = [] + # loop to deal with groups + for i, param_group in enumerate(self.optimizer.param_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # push this group to list before modify + # TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group + trainable_parameters = [] + for param in param_group['params']: + if param.requires_grad: + param.grad_accum = None + trainable_parameters.append(param) + self.bit16_groups.append(trainable_parameters) + + # not sure why apex was cloning the weights before flattening + # removing cloning here + + see_memory_usage(f"Before moving param group {i} to CPU") + # move all the parameters to cpu to free up GPU space for creating flat buffer + + # Create temp CPU param copies, free accelerator tensors + orig_group_numel = 0 + for param in self.bit16_groups[i]: + orig_group_numel += param.numel() + param.cpu_data = param.data.cpu() + param.data = torch.empty(1).to(param.device) + + empty_cache() + see_memory_usage(f"After moving param group {i} to CPU", force=False) + + # Reorder group parameters for load balancing of gradient partitioning during backward among ranks. + # This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks. + # For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging + # to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m). + if self.round_robin_gradients: + round_robin_tensors, round_robin_indices = self._round_robin_reorder( + self.bit16_groups[i], dist.get_world_size(group=self.real_dp_process_group[i])) + else: + round_robin_tensors = self.bit16_groups[i] + round_robin_indices = list(range(len(self.bit16_groups[i]))) + + self.round_robin_bit16_groups.append(round_robin_tensors) + self.round_robin_bit16_indices.append(round_robin_indices) + + # Create meta tensors list, ordered according to round_robin_tensors + meta_tensors = [] + for param in round_robin_tensors: + meta_tensors.append(torch.zeros_like(param.cpu_data, device="meta")) + self.round_robin_bit16_meta.append(meta_tensors) + + # create flat buffer in CPU + flattened_buffer = self.flatten_dense_tensors_aligned( + self.round_robin_bit16_groups[i], + self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i]), + use_cpu_data=True) + + # free temp CPU params + for param in self.bit16_groups[i]: + del param.cpu_data + + # Move CPU flat tensor to the accelerator memory. + self.bit16_groups_flat.append(flattened_buffer.to(get_accelerator().current_device_name())) + del flattened_buffer + + see_memory_usage(f"After flattening and moving param group {i} to GPU", force=False) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.bit16_groups_flat[i].numel() - orig_group_numel + else: + padding = 0 + self.groups_padding.append(padding) + + if dist.get_rank(group=self.real_dp_process_group[i]) == 0: + see_memory_usage(f"After Flattening and after emptying param group {i} cache", force=False) + + # set model bit16 weight to slices of flattened buffer + self._update_model_bit16_weights(i) + + # divide the flat weights into near equal partition equal to the data parallel degree + # each process will compute on a different part of the partition + data_parallel_partitions = self.get_data_parallel_partitions(self.bit16_groups_flat[i], i) + self.parallel_partitioned_bit16_groups.append(data_parallel_partitions) + + # verify that data partition start locations are 4-byte aligned + for partitioned_data in data_parallel_partitions: + assert (partitioned_data.data_ptr() % (2 * self.nccl_start_alignment_factor) == 0) + + # A partition of the fp32 master weights that will be updated by this process. + # Note that the params in single_partition_of_fp32_groups is cloned and detached + # from the origin params of the model. + if not fp16_master_weights_and_gradients: + weights_partition = self.parallel_partitioned_bit16_groups[i][partition_id].to( + self.device).clone().float().detach() + else: + weights_partition = self.parallel_partitioned_bit16_groups[i][partition_id].to( + self.device).clone().half().detach() + + if self.cpu_offload: + weights_partition = get_accelerator().pin_memory(weights_partition) + + self.single_partition_of_fp32_groups.append(weights_partition) + + # Set local optimizer to have flat params of its own partition. + # After this, the local optimizer will only contain its own partition of params. + # In that case, the local optimizer only saves the states(momentum, variance, etc.) related to its partition's params(zero stage1). + self.single_partition_of_fp32_groups[ + i].requires_grad = True # keep this in case internal optimizer uses it + param_group['params'] = [self.single_partition_of_fp32_groups[i]] + + partition_size = len(self.bit16_groups_flat[i]) / dist.get_world_size(group=self.real_dp_process_group[i]) + params_in_partition, params_not_in_partition, first_offset = self.get_partition_info( + self.round_robin_bit16_groups[i], partition_size, partition_id) + + self.partition_size.append(partition_size) + self.params_in_partition.append(params_in_partition) + self.params_not_in_partition.append(params_not_in_partition) + self.first_offset.append(first_offset) + + self.reduce_bucket_size = int(reduce_bucket_size) + self.use_multi_rank_bucket_allreduce = use_multi_rank_bucket_allreduce + self.allgather_bucket_size = int(allgather_bucket_size) + + self.reduction_stream = None if get_accelerator().is_synchronized_device() else get_accelerator().Stream() + #self.copy_grad_stream = get_accelerator().Stream() + self.callback_queued = False + + self.param_dict = {} + + # map between param_id and bool to specify if a param is in this partition + self.is_param_in_current_partition = {} + + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.elements_in_ipg_bucket = 0 + self.params_already_reduced = [] + self._release_ipg_buffers() + self.previous_reduced_grads = None + self.ipg_bucket_has_moe_params = False + + # simplified param id + self.param_id = {} + + #interesting code: unique ids being assigned to individual parameters + largest_param_numel = 0 + count = 0 + for i, params_group in enumerate(self.bit16_groups): + for param in params_group: + unique_id = id(param) + self.param_id[unique_id] = count + self.param_dict[count] = param + self.params_already_reduced.append(False) + if param.numel() > largest_param_numel: + largest_param_numel = param.numel() + count = count + 1 + + for param_group in self.params_in_partition: + for param in param_group: + self.is_param_in_current_partition[self.get_param_id(param)] = True + + for param_group in self.params_not_in_partition: + for param in param_group: + self.is_param_in_current_partition[self.get_param_id(param)] = False + + if self.cpu_offload: + self.accumulated_grads_in_cpu = {} + self.norm_for_param_grads = {} + self.local_overflow = False + self.grad_position = {} + self.temp_grad_buffer_for_cpu_offload = torch.zeros(largest_param_numel, + device=self.device, + dtype=self.dtype) + if self.cpu_offload_pin_memory: + self.temp_grad_buffer_for_cpu_offload = get_accelerator().pin_memory( + self.temp_grad_buffer_for_cpu_offload) + self.temp_grad_buffer_for_gpu_offload = torch.zeros(largest_param_numel, + device=get_accelerator().current_device_name(), + dtype=self.dtype) + for i, params_group in enumerate(self.bit16_groups): + self.get_grad_position(i, self.params_in_partition[i], self.first_offset[i], self.partition_size[i]) + + # mapping from parameter to partition that it belongs to + self.param_to_partition_ids = {} + + # stores if a partition has been reduced in this step + self.is_partition_reduced = {} + + # number of grads in partition that still need to be computed + self.remaining_grads_in_partition = {} + + # total number of grads in partition + self.total_grads_in_partition = {} + + # stores if a grad in a partition has been computed or not + self.is_grad_computed = {} + + # stores the offset at which a parameter gradient needs to be inserted in a partition + self.grad_partition_insertion_offset = {} + + # the offset in the gradient at which it must be inserted at the beginning of the partition + self.grad_start_offset = {} + + # will store the averaged gradients required by this partition + self.averaged_gradients = {} + + # For cpu_offload, will store the averaged gradients required by this partition + self.offload_gradient_dict = {} + + # store index of first parameter in each partition + self.first_param_index_in_partition = {} + + # initializes all data structures for implementing gradient partitioning + self.initialize_gradient_partitioning_data_structures() + + # resets the data structure value for the next backward propagation + self.reset_partition_gradient_structures() + + # creates backward hooks for gradient partitioning + self._grad_acc_hooks = [] + if self.partition_gradients or self.overlap_comm: + self.create_reduce_and_remove_grad_hooks() + + self.custom_loss_scaler = False + self.external_loss_scale = None + + # we may have a way of fusing dynamic scale. Do not support for now + self.loss_scaler = CreateLossScaler(dtype=self.dtype, + static_loss_scale=static_loss_scale, + dynamic_scaling=dynamic_loss_scale, + dynamic_loss_args=dynamic_loss_args) + self.dynamic_loss_scale = self.loss_scaler.dynamic + + if self.dtype != torch.float16: + # Only fp16 should use dynamic loss scaling + assert self.loss_scaler.cur_scale == 1.0 + assert not self.dynamic_loss_scale + + see_memory_usage("Before initializing optimizer states", force=True) + self.initialize_optimizer_states() + see_memory_usage("After initializing optimizer states", force=True) + + if dist.get_rank() == 0: + logger.info(f"optimizer state initialized") + + if dist.get_rank(group=self.dp_process_group) == 0: + see_memory_usage(f"After initializing ZeRO optimizer", force=True) + + self._link_all_hp_params() + self._hp_optimizer_states_linked = False + + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def destroy(self): + for hook in self._grad_acc_hooks: + hook.remove() + self.print_rank_0("Removed grad acc hooks") + + def _enable_universal_checkpoint(self): + for lp_param_group in self.bit16_groups: + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, _ in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + for lp in self.bit16_groups[i]: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + if self.cpu_offload: + self._get_offload_gradient_dict() + + for i, _ in enumerate(self.optimizer.param_groups): + # Link bit16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + partition_size = self.bit16_groups_flat[i].numel() // dp_world_size + flat_hp_partition = self.single_partition_of_fp32_groups[i] + link_hp_params(lp_param_list=self.bit16_groups[i], + flat_hp_partition=flat_hp_partition, + gradient_dict=self.averaged_gradients, + offload_gradient_dict=self.offload_gradient_dict, + use_offload=self.cpu_offload, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + dp_group=self.real_dp_process_group[i]) + + def _lazy_init_hp_params_optimizer_state(self): + if not self._hp_optimizer_states_linked: + for i, _ in enumerate(self.optimizer.param_groups): + lazy_init_hp_params_optimizer_state(self.bit16_groups[i], self.single_partition_of_fp32_groups[i], + self.optimizer.state) + self._hp_optimizer_states_linked = True + + def is_moe_group(self, group): + return 'moe' in group and group['moe'] + + def _configure_moe_settings(self): + # if we're using ZeRO stage 2, ensure contiguous gradients are used + if self.partition_gradients: + assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + # NOTE: To run ZeRO stage 1 with MoE, we need to set self.contiguous_gradients to True or ignore the assertion + if not self.partition_gradients and not self.contiguous_gradients: + logger.warn( + "ZeRO Stage 1 has not been thoroughly tested with MoE. This configuration is still experimental.") + assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + + assert any( + [self.is_moe_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + self.is_moe_param_group = [] + for i, group in enumerate(self.optimizer.param_groups): + if self.is_moe_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = self.expert_dp_process_group[group['name']] + self.partition_count[i] = dist.get_world_size(group=self.expert_dp_process_group[group['name']]) + self.is_moe_param_group.append(True) + else: + self.is_moe_param_group.append(False) + + assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE" + assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE" + + def _update_model_bit16_weights(self, group_index): + updated_params = self.unflatten(self.bit16_groups_flat[group_index], self.round_robin_bit16_meta[group_index]) + for p, q in zip(self.round_robin_bit16_groups[group_index], updated_params): + p.data = q.data + + # set model fp16 weight to slices of reordered flattened buffer + for param_index, param in enumerate(self.bit16_groups[group_index]): + new_index = self.round_robin_bit16_indices[group_index][param_index] + param.data = self.round_robin_bit16_groups[group_index][new_index].data + + def _round_robin_reorder(self, tensor_list, num_partitions): + + # disable round robin if need to debug something + # return tensor_list, list(range(len(tensor_list))) + + partition_tensors = {} + + for i, tensor in enumerate(tensor_list): + j = i % num_partitions + if not j in partition_tensors: + partition_tensors[j] = [] + partition_tensors[j].append((i, tensor)) + + reordered_tensors = [] + reordered_indices = {} + + for partition_index in partition_tensors.keys(): + for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]): + reordered_indices[original_index] = len(reordered_tensors) + reordered_tensors.append(tensor) + + return reordered_tensors, reordered_indices + + def _release_ipg_buffers(self): + if self.contiguous_gradients: + self.ipg_buffer = None + self.grads_in_partition = None + self.grads_in_partition_offset = 0 + + def initialize_optimizer_states(self): + + for i, group in enumerate(self.bit16_groups): + single_grad_partition = torch.zeros(int(self.partition_size[i]), + dtype=self.single_partition_of_fp32_groups[i].dtype, + device=self.device) + self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory( + single_grad_partition) if self.cpu_offload_pin_memory else single_grad_partition + + # Initialize the optimizer states with the flattened fp32 partition. + # State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers + # which do lazy initialization of the state at the first call to step. + if isinstance(self.optimizer, torch.optim.Adagrad): + self.optimizer = torch.optim.Adagrad(self.single_partition_of_fp32_groups, **self.optimizer.defaults) + + if not self.cpu_offload: + for group in self.single_partition_of_fp32_groups: + group.grad = None #class init + + return + + ######################################################################### + #################### ZeRO Stage 1 - reduce gradients #################### + ######################################################################### + def reduce_gradients(self, pipeline_parallel=False): + world_size = dist.get_world_size(self.dp_process_group) + my_rank = dist.get_rank(self.dp_process_group) + + # with PP we must create ipg buffer, since backward is handled outside zero + if pipeline_parallel and self.contiguous_gradients: + self.ipg_buffer = [] + buf_0 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=get_accelerator().current_device_name()) + self.ipg_buffer.append(buf_0) + self.ipg_index = 0 + + if not self.overlap_comm: + for i, group in enumerate(self.bit16_groups): + for param in group: + grad_reduc = self.get_gradient_for_reduction(param) + if grad_reduc is not None: + self.reduce_ready_partitions_and_remove_grads(param, i) + # reduce any pending grads in either hook/non-hook case + self.overlapping_partition_gradients_reduce_epilogue() + + ######################################################################### + #########################ZeRO Partition Gradients######################## + ######################################################################### + + def get_first_param_index(self, group_id, param_group, partition_id): + for index, param in enumerate(param_group): + param_id = self.get_param_id(param) + if partition_id in self.param_to_partition_ids[group_id][param_id]: + return index + return None + + def initialize_gradient_partitioning_data_structures(self): + + for i, param_group in enumerate(self.round_robin_bit16_groups): + total_partitions = dist.get_world_size(group=self.real_dp_process_group[i]) + + self.param_to_partition_ids[i] = {} + self.is_partition_reduced[i] = {} + self.total_grads_in_partition[i] = {} + self.remaining_grads_in_partition[i] = {} + self.is_grad_computed[i] = {} + self.grad_partition_insertion_offset[i] = {} + self.grad_start_offset[i] = {} + self.first_param_index_in_partition[i] = {} + + for partition_id in range(total_partitions): + self.is_grad_computed[i][partition_id] = {} + self.grad_partition_insertion_offset[i][partition_id] = {} + self.grad_start_offset[i][partition_id] = {} + self.total_grads_in_partition[i][partition_id] = 0 + self.initialize_gradient_partition(i, param_group, partition_id) + self.is_partition_reduced[i][partition_id] = False + self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index( + i, param_group, partition_id) + + def independent_gradient_partition_epilogue(self): + self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0) + self.reduce_ipg_grads() + self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0) + + # if dist.get_rank() == 0: + # logger.info("Params already reduced %s", self.params_already_reduced) + for i in range(len(self.params_already_reduced)): + self.params_already_reduced[i] = False + + if self.overlap_comm: + if not get_accelerator().resolves_data_dependency(): + get_accelerator().synchronize() + # It is safe to clear previously reduced grads of other partitions + self._clear_previous_reduced_grads() + + if self.cpu_offload is False: + for i, _ in enumerate(self.bit16_groups): + + if not i in self.averaged_gradients or self.averaged_gradients[i] is None: + self.averaged_gradients[i] = self.get_flat_partition( + self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i], + dtype=self.gradient_accumulation_dtype, + device=get_accelerator().current_device_name(), + return_tensor_list=True) + else: + avg_new = self.get_flat_partition(self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i], + dtype=self.gradient_accumulation_dtype, + device=get_accelerator().current_device_name(), + return_tensor_list=True) + + for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new): + accumulated_grad.add_(new_avg_grad) + + self._release_ipg_buffers() + + # No need to keep the gradients anymore. + # All gradients required by the step + # are in self.averaged_gradients + self.zero_grad(set_to_none=True) + see_memory_usage(f"End ipg_epilogue") + + # resets all partition to no reduced + # sets remaining grads to the total number of grads in each partition + # set is grad computed to false for all grads in partition + def reset_partition_gradient_structures(self): + for i, _ in enumerate(self.bit16_groups): + total_partitions = dist.get_world_size(group=self.real_dp_process_group[i]) + for partition_id in range(total_partitions): + self.is_partition_reduced[i][partition_id] = False + self.remaining_grads_in_partition[i][partition_id] = self.total_grads_in_partition[i][partition_id] + + for param_id in self.is_grad_computed[i][partition_id]: + self.is_grad_computed[i][partition_id][param_id] = False + + def initialize_gradient_partition(self, i, param_group, partition_id): + + def set_key_value_list(dictionary, key, value): + if key in dictionary: + dictionary[key].append(value) + else: + dictionary[key] = [value] + + def increment_value(dictionary, key): + if key in dictionary: + dictionary[key] += 1 + else: + dictionary[key] = 1 + + partition_size = self.partition_size[i] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for param in param_group: + + param_size = param.numel() + param_id = self.get_param_id(param) + + if start_index <= current_index < end_index: + set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][param_id] = current_index - start_index + self.grad_start_offset[i][partition_id][param_id] = 0 + + elif current_index < start_index < (current_index + param_size): + assert (first_offset == 0 + ), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][param_id] = 0 + self.grad_start_offset[i][partition_id][param_id] = first_offset + + current_index = current_index + param_size + + def overlapping_partition_gradients_reduce_epilogue(self): + self.independent_gradient_partition_epilogue() + + def fill_grad_accum_attribute(self): + for group in self.bit16_groups: + for param in group: + if param.grad is not None: + if param.grad_accum is None: + param.grad_accum = param.grad.to(self.gradient_accumulation_dtype) + else: + param.grad_accum.add_( + param.grad.to(self.gradient_accumulation_dtype).view(param.grad_accum.shape)) + param.grad = None + + def get_gradient_for_reduction(self, param): + if self.use_grad_accum_attribute: + return param.grad_accum.to(self.dtype) if param.grad_accum is not None else None + else: + return param.grad + + def get_param_gradient_attribute(self, param): + return param.grad_accum if self.use_grad_accum_attribute else param.grad + + # Clear the tensor the reduction gradient attribute is pointing to + def clear_grad_attribute(self, param): + if self.use_grad_accum_attribute: + param.grad_accum = None + else: + param.grad = None + + def create_reduce_and_remove_grad_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.bit16_groups): + for param in param_group: + if param.requires_grad: + + def wrapper(param, i): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def reduce_partition_and_remove_grads(*notneeded): + self.reduce_ready_partitions_and_remove_grads(param, i) + + self._grad_acc_hooks.append(grad_acc.register_hook(reduce_partition_and_remove_grads)) + self.grad_accs.append(grad_acc) + + wrapper(param, i) + + def get_param_id(self, param): + unique_id = id(param) + return self.param_id[unique_id] + + def report_ipg_memory_usage(self, tag, param_elems): + elem_count = self.elements_in_ipg_bucket + param_elems + percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size + see_memory_usage( + f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}" + ) + + # create a flat tensor aligned at the alignment boundary + def flatten_dense_tensors_aligned(self, tensor_list, alignment, use_cpu_data=False): + tensor_list = [param.cpu_data for param in tensor_list] if use_cpu_data else tensor_list + return self.flatten(align_dense_tensors(tensor_list, alignment)) + + ############### Independent Partition Gradient ######################## + def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): + + grad_reduc = self.get_gradient_for_reduction(param) + if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size: + self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.numel()) + self.reduce_ipg_grads() + if self.contiguous_gradients and self.overlap_comm: + # Swap ipg_index between 0 and 1 + self.ipg_index = 1 - self.ipg_index + self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", param.numel()) + + param_id = self.get_param_id(param) + assert self.params_already_reduced[param_id] == False, \ + f"The parameter {param_id} has already been reduced. \ + Gradient computed twice for this partition. \ + Multiple gradient reduction is currently not supported" + + if self.contiguous_gradients: + if param.numel() > self.reduce_bucket_size: + self.extra_large_param_to_reduce = param + else: + # keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening + new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(0, self.elements_in_ipg_bucket, param.numel()) + new_grad_tensor.copy_(grad_reduc.view(-1)) + grad_reduc.data = new_grad_tensor.data.view_as(grad_reduc) + + self.elements_in_ipg_bucket += param.numel() + + assert grad_reduc is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient" + + self.grads_in_ipg_bucket.append(grad_reduc) + self.params_in_ipg_bucket.append((i, param, param_id)) + + #make sure the average tensor function knows how to average the gradients + if is_moe_param(param): + self.ipg_bucket_has_moe_params = True + + self.report_ipg_memory_usage("End ipg_remove_grads", 0) + + def print_rank_0(self, message): + if dist.get_rank() == 0: + logger.info(message) + + def gradient_reduction_w_predivide(self, tensor): + + dp_world_size = dist.get_world_size(group=self.dp_process_group) + + tensor_to_allreduce = tensor + + if self.communication_data_type != tensor.dtype: + tensor_to_allreduce = tensor.to(self.communication_data_type) + + if self.postscale_gradients: + if self.gradient_predivide_factor != 1.0: + tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor) + + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.gradient_predivide_factor != dp_world_size: + tensor_to_allreduce.mul_(self.gradient_predivide_factor / + (dp_world_size / float(self.sequence_parallel_size))) + else: + tensor_to_allreduce.div_(dp_world_size / float(self.sequence_parallel_size)) + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: + tensor.copy_(tensor_to_allreduce) + + return tensor + + def allreduce_and_copy_with_multiple_ranks(self, + small_bucket, + log=None, + divide=True, + process_group=None, + bucket_ranks=None): + process_group = self.dp_process_group if process_group is None else process_group + allreduced = self.allreduce_bucket(small_bucket, log=log, divide=divide, process_group=process_group) + for buf, synced, bucket_rank in zip(small_bucket, self.unflatten(allreduced, small_bucket), bucket_ranks): + if dist.get_rank(group=process_group) == bucket_rank: + buf.copy_(synced) + + def allreduce_and_scatter(self, bucket, numel_per_bucket=500000000, log=None, divide=True, process_group=None): + small_bucket = [] + small_bucket_ranks = [] + numel = 0 + allreduce_sizes = [] + + for i, bucket_elem in enumerate(bucket): + rank, tensor = bucket_elem + small_bucket.append(tensor) + small_bucket_ranks.append(rank) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy_with_multiple_ranks(small_bucket, + log=None, + divide=divide, + process_group=process_group, + bucket_ranks=small_bucket_ranks) + small_bucket = [] + small_bucket_ranks = [] + numel = 0 + + if len(small_bucket) > 0: + self.allreduce_and_copy_with_multiple_ranks(small_bucket, + log=None, + divide=divide, + process_group=process_group, + bucket_ranks=small_bucket_ranks) + + def average_tensor(self, tensor): + if self.overlap_comm: + stream = self.reduction_stream + if not get_accelerator().resolves_data_dependency(): + stream.wait_stream(get_accelerator().current_stream()) + else: + stream = get_accelerator().current_stream() + + with get_accelerator().stream(stream): + if not self.reduce_scatter: + self.gradient_reduction_w_predivide(tensor) + return + + # Accumulate destination ranks and bucket offsets for each gradient slice. + # Note: potential future optimization, record access pattern of parameters + # in backward pass and partition gradients w.r.t. access pattern so that our + # bucket is guaranteed to be contiguous w.r.t. ranks + rank_and_offsets = [] + real_dp_process_group = [] + curr_size = 0 + prev_id, prev_process_group = -1, None + + process_group = self.dp_process_group + # count = 0 + for i, param, param_id in self.params_in_ipg_bucket: + + process_group = self.dp_process_group + grad_reduc = self.get_gradient_for_reduction(param) + #Averages gradients at parameter level if ipg has a moe param + #Otherwise averaging is done at the entire buffer level at the end of the loop + # MoE param have different groups + if self.ipg_bucket_has_moe_params: + process_group = self.expert_dp_process_group[param.group_name] if is_moe_param( + param) else self.dp_process_group + grad_reduc.data.div_(dist.get_world_size(group=process_group) / float(self.sequence_parallel_size)) + + partition_ids = self.param_to_partition_ids[i][param_id] + assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids + ]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}" + partition_size = self.partition_size[i] + # Get all partition ids + their offsets + partition_ids_w_offsets = [] + for partition_id in partition_ids: + offset = self.grad_start_offset[i][partition_id][param_id] + partition_ids_w_offsets.append((partition_id, offset)) + partition_ids_w_offsets.sort(key=lambda t: t[1]) + + # Calculate rank and offsets for grad slices + for idx in range(len(partition_ids_w_offsets)): + partition_id, offset = partition_ids_w_offsets[idx] + + # if dist.get_rank() == 0 and count < 100: + # print(f"Rank {dist.get_rank()} rank offset id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}") + # count += 1 + + # Calculate numel for grad slice depending on partition location + if idx == len(partition_ids_w_offsets) - 1: + # Last partition_id uses its own offset + numel = param.numel() - offset + else: + # Set numel to next partition's offset + numel = partition_ids_w_offsets[idx + 1][1] - offset + + # Merge bucket ranges if they belong to the same rank + if partition_id == prev_id and process_group == prev_process_group: + prev_pid, prev_size, prev_numel = rank_and_offsets[-1] + rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel) + else: + rank_and_offsets.append((partition_id, curr_size, numel)) + real_dp_process_group.append(process_group) + curr_size += numel + prev_id, prev_process_group = partition_id, process_group + + if not self.ipg_bucket_has_moe_params: + tensor.div_(dist.get_world_size(group=self.dp_process_group) / float(self.sequence_parallel_size)) + + buckets = {} + for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets): + grad_slice = tensor.narrow(0, int(bucket_offset), int(numel)) + bucket_key = real_dp_process_group[i] if self.use_multi_rank_bucket_allreduce else ( + dst, real_dp_process_group[i]) + if bucket_key not in buckets: + buckets[bucket_key] = [] + if self.use_multi_rank_bucket_allreduce: + buckets[bucket_key].append((dst, grad_slice)) + else: + buckets[bucket_key].append(grad_slice) + + for bucket_key in buckets: + if self.use_multi_rank_bucket_allreduce: + self.allreduce_and_scatter(buckets[bucket_key], + numel_per_bucket=self.reduce_bucket_size, + divide=False, + process_group=bucket_key) + else: + dst, process_group = bucket_key + self.allreduce_no_retain(buckets[bucket_key], + numel_per_bucket=self.reduce_bucket_size, + rank=dst, + divide=False, + process_group=process_group) + + ############################################################################## + ############################# CPU Offload Methods############################# + ############################################################################## + def get_grad_position(self, group_id, tensor_list, first_offset, partition_size): + current_offset = 0 + + for i, tensor in enumerate(tensor_list): + param_id = self.get_param_id(tensor) + param_start_offset = 0 + + num_elements = tensor.numel() + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + param_start_offset = first_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_offset): + num_elements = partition_size - current_offset + + self.grad_position[param_id] = [ + int(group_id), int(param_start_offset), + int(current_offset), int(num_elements) + ] + current_offset += num_elements + + def update_overflow_tracker_for_param_grad(self, param): + grad_accum = self.get_param_gradient_attribute(param) + if grad_accum is not None and self._has_inf_or_nan(grad_accum.data): + self.local_overflow = True + + def _get_offload_gradient_dict(self): + for param_group_index, _ in enumerate(self.optimizer.param_groups): + self.offload_gradient_dict[param_group_index] = [] + for lp_param in self.params_in_partition[param_group_index]: + param_id = self.get_param_id(lp_param) + [_, _, dest_offset, num_elements] = self.grad_position[param_id] + dest_tensor = self.single_partition_of_fp32_groups[param_group_index].grad.view(-1).narrow( + 0, dest_offset, num_elements) + self.offload_gradient_dict[param_group_index].append(dest_tensor) + + def async_accumulate_grad_in_cpu_via_gpu(self, param): + param_id = self.get_param_id(param) + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + # copy to a preexisiting buffer to avoid memory allocation penalty + dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(0, 0, param.numel()) + + #buffer for storing gradients for this parameter in CPU + def buffer_to_accumulate_to_in_cpu(): + if not self.fp16_master_weights_and_gradients: + buffer = torch.zeros(param.numel(), dtype=param.dtype, device=self.device) + return get_accelerator().pin_memory(buffer) if self.cpu_offload_pin_memory else buffer + else: + return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements) + + #accumulate gradients into param.grad_accum or parts of it that belongs to this partition + def accumulate_gradients(): + grad_accum = self.get_param_gradient_attribute(param) + if not self.fp16_master_weights_and_gradients: + dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1), non_blocking=True) + grad_accum.data.view(-1).add_(dest_buffer) + else: + dest_buffer.narrow(0, source_offset, + num_elements).copy_(self.accumulated_grads_in_cpu[param_id].view(-1), + non_blocking=True) + grad_accum.data.view(-1).narrow(0, source_offset, + num_elements).add_(dest_buffer.narrow(0, source_offset, num_elements)) + + #move accumulated gradients back to CPU + def copy_gradients_to_cpu(): + grad_accum = self.get_param_gradient_attribute(param) + if not self.fp16_master_weights_and_gradients: + self.accumulated_grads_in_cpu[param_id].data.copy_(grad_accum.data.view(-1), non_blocking=True) + else: + self.accumulated_grads_in_cpu[param_id].data.copy_(grad_accum.data.view(-1).narrow( + 0, source_offset, num_elements), + non_blocking=True) + + if param_id not in self.accumulated_grads_in_cpu: + self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu() + + if self.micro_step_id > 0: + accumulate_gradients() + + # at the boundary we will send 32bit directly + if not self.is_gradient_accumulation_boundary: + copy_gradients_to_cpu() + + def set_norm_for_param_grad(self, param): + param_id = self.get_param_id(param) + grad_accum = self.get_param_gradient_attribute(param) + accumulated_grad = self.accumulated_grads_in_cpu[ + param_id] if self.gradient_accumulation_steps > 1 else grad_accum + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + start = source_offset + accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements) + + self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2) + + def set_norm_for_param_grad_in_gpu(self, param): + param_id = self.get_param_id(param) + grad_accum = self.get_param_gradient_attribute(param) + if grad_accum is None: + accumulated_grad = param.grad + else: + accumulated_grad = grad_accum + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + start = source_offset + accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements) + + self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2) + + def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param): + param_id = self.get_param_id(param) + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements) + + grad_accum = self.get_param_gradient_attribute(param) + if grad_accum is None: + src_tensor = grad_accum.view(-1).narrow(0, source_offset, num_elements) + else: + src_tensor = grad_accum.view(-1).narrow(0, source_offset, num_elements) + if not self.fp16_master_weights_and_gradients: + src_tensor = src_tensor.float() + + dest_tensor.copy_(src_tensor, non_blocking=True) + param.grad = None #offload only + + def complete_grad_norm_calculation_for_cpu_offload(self, params): + total_norm = 0.0 + norm_type = 2.0 + for p in params: + # Pipeline parallelism may replicate parameters. Avoid multi-counting. + if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: + continue + + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_id = self.get_param_id(p) + # as some model have trainable parameters but skipped in training, + # their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run, + # so they have no norm_for_param_grads + if param_id in self.norm_for_param_grads: + param_norm = self.norm_for_param_grads[param_id] + total_norm += param_norm.item()**2 + else: + # As unused parameters in modules may not be expected sometimes, + # add an explicit error msg when it occurred and an option to + # avoid the error + assert self.ignore_unused_parameters, """ + This assert indicates that your module has parameters that + were not used in producing loss. + You can avoid this assert by + (1) enable ignore_unused_parameters option in zero_optimization config; + (2) making sure all trainable parameters and `forward` function + outputs participate in calculating loss. + """ + + # Sum across all model parallel GPUs. + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) + + total_norm = total_norm_cuda[0].item()**(1. / norm_type) + + if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + ############################################################################################ + def copy_grads_in_partition(self, param): + if self.cpu_offload: + + if self.gradient_accumulation_steps > 1: + self.async_accumulate_grad_in_cpu_via_gpu(param) + + if self.is_gradient_accumulation_boundary: + self.set_norm_for_param_grad_in_gpu(param) + + self.update_overflow_tracker_for_param_grad(param) + + self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param) + + return + #print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}") + if self.grads_in_partition is None: + self.grads_in_partition_offset = 0 + total_size = 0 + for group in self.params_in_partition: + for param_in_partition in group: + total_size += param_in_partition.numel() + + see_memory_usage(f"before copying {total_size} gradients into partition") + self.grads_in_partition = torch.empty(int(total_size), + dtype=self.dtype, + device=get_accelerator().current_device_name()) + see_memory_usage(f"after copying {total_size} gradients into partition") + + grad_reduc = self.get_gradient_for_reduction(param) + # The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer + new_grad_tensor = self.grads_in_partition.view(-1).narrow(0, self.grads_in_partition_offset, param.numel()) + new_grad_tensor.copy_(grad_reduc.view(-1)) + grad_reduc.data = new_grad_tensor.data.view_as(grad_reduc) + #print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}") + self.grads_in_partition_offset += param.numel() + + def reduce_ipg_grads(self): + if self.contiguous_gradients: + if self.extra_large_param_to_reduce is not None: + assert len(self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen" + _, _, param_id = self.params_in_ipg_bucket[0] + assert self.get_param_id(self.extra_large_param_to_reduce + ) == param_id, "param in ipg bucket does not match extra-large param" + extra_large_grad_reduc = self.get_gradient_for_reduction(self.extra_large_param_to_reduce) + self.average_tensor(extra_large_grad_reduc.view(-1)) + self.extra_large_param_to_reduce = None + else: + self.average_tensor(self.ipg_buffer[self.ipg_index].narrow(0, 0, self.elements_in_ipg_bucket)) + else: + self.buffered_reduce_fallback(None, + self.grads_in_ipg_bucket, + elements_per_buffer=self.elements_in_ipg_bucket) + + if self.overlap_comm: + stream = self.reduction_stream + elif self.cpu_offload: + # TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed. + # get_accelerator().synchronize() + # stream = self.copy_grad_stream + stream = get_accelerator().current_stream() + else: + stream = get_accelerator().current_stream() + + with get_accelerator().stream(stream): + for _, param, param_id in self.params_in_ipg_bucket: + + assert self.params_already_reduced[param_id] == False, \ + f"The parameter {param_id} has already been reduced. \ + Gradient computed twice for this partition. \ + Multiple gradient reduction is currently not supported" + + self.params_already_reduced[param_id] = True + if self.partition_gradients: + if not self.is_param_in_current_partition[param_id]: + if self.overlap_comm and self.contiguous_gradients is False: + # Clear grads of other partitions during the next reduction + # to avoid clearing them before the reduction is complete. + if self.previous_reduced_grads is None: + self.previous_reduced_grads = [] + self.previous_reduced_grads.append(param) + else: + self.clear_grad_attribute(param) + elif self.contiguous_gradients: + self.copy_grads_in_partition(param) + else: # zero stage 1 - partition only optimizer state + if self.contiguous_gradients and self.is_param_in_current_partition[param_id]: + self.copy_grads_in_partition(param) + + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.ipg_bucket_has_moe_params = False + self.elements_in_ipg_bucket = 0 + ##################################################################### + + def reduce_ready_partitions_and_remove_grads(self, param, i): + if self.partition_gradients or self.is_gradient_accumulation_boundary: + self.reduce_independent_p_g_buckets_and_remove_grads(param, i) + + def zero_reduced_gradients(self, partition_id, i): + + def are_all_related_partitions_reduced(params_id): + for partition_id in self.param_to_partition_ids[i][params_id]: + if not self.is_partition_reduced[i][partition_id]: + return False + return True + + for params_id in self.is_grad_computed[i][partition_id]: + if are_all_related_partitions_reduced(params_id): + self.param_dict[params_id].grad = None # dead code + + def flatten_and_print(self, message, tensors, start=0, n=5): + flatten_tensor = self.flatten(tensors) + + def print_func(): + logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) + + self.sequential_execution(print_func, message) + + def get_grads_to_reduce(self, i, partition_id): + + def get_reducible_portion(key): + grad = self.param_dict[key].grad + total_elements = grad.numel() + start = self.grad_start_offset[i][partition_id][key] + num_elements = min(total_elements - start, + self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key]) + if not pg_correctness_test: + if num_elements == total_elements: + return grad + else: + return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements)) + else: + if num_elements == total_elements: + return grad.clone() + else: + return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements)) + + grads_to_reduce = [] + for key in self.is_grad_computed[i][partition_id]: + grad = get_reducible_portion(key) + grads_to_reduce.append(grad) + return grads_to_reduce + + def sequential_execution(self, function, message, group=None): + if group is None: + group = self.dp_process_group + if dist.get_rank(group=group) == 0: + logger.info(message) + for id in range(dist.get_world_size(group=group)): + if id == dist.get_rank(group=group): + function() + dist.barrier(group=group) + + def set_none_gradients_to_zero(self, i, partition_id): + for param_id in self.is_grad_computed[i][partition_id]: + param = self.param_dict[param_id] + if param.grad is None: + param.grad = torch.zero_like(param) + + ######################Reduction Related Methods############################## + def allreduce_bucket(self, bucket, rank=None, log=None, divide=True, process_group=None): + tensor = self.flatten(bucket) + + process_group = self.dp_process_group if process_group is None else process_group + + tensor_to_allreduce = tensor + + if pg_correctness_test or self.sequence_parallel_size > 1: + communication_data_type = torch.float32 + else: + communication_data_type = self.communication_data_type + + if communication_data_type != tensor.dtype: + tensor_to_allreduce = tensor.to(communication_data_type) + + if divide: + tensor_to_allreduce.div_(dist.get_world_size(group=process_group) / float(self.sequence_parallel_size)) + + if rank is None: + # "All Reducing" + dist.all_reduce(tensor_to_allreduce, group=process_group) + else: + global_rank = dist.get_global_rank(process_group, rank) + dist.reduce(tensor_to_allreduce, global_rank, group=process_group) + + if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: + if rank is None or rank == dist.get_rank(group=process_group): + tensor.copy_(tensor_to_allreduce) + + return tensor + + def _clear_previous_reduced_grads(self): + if self.previous_reduced_grads is not None: + for param in self.previous_reduced_grads: + self.clear_grad_attribute(param) + self.previous_reduced_grads = None + + # if rank is specified do a reduction instead of an allreduce + def allreduce_and_copy(self, small_bucket, rank=None, log=None, divide=True, process_group=None): + process_group = self.dp_process_group if process_group is None else process_group + if self.overlap_comm: + if not get_accelerator().resolves_data_dependency(): + get_accelerator().synchronize() + # It is safe to clear the previously reduced grads of other partitions + self._clear_previous_reduced_grads() + stream = self.reduction_stream + else: + stream = get_accelerator().current_stream() + + with get_accelerator().stream(stream): + allreduced = self.allreduce_bucket( + small_bucket, + rank=rank, + log=log, + divide=divide, + process_group=process_group, + ) + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): + buf.copy_(synced) + + def allreduce_no_retain( + self, + bucket, + numel_per_bucket=500000000, + rank=None, + log=None, + divide=True, + process_group=None, + ): + small_bucket = [] + numel = 0 + for tensor in bucket: + small_bucket.append(tensor) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy(small_bucket, rank=rank, log=None, divide=divide, process_group=process_group) + small_bucket = [] + numel = 0 + + if len(small_bucket) > 0: + self.allreduce_and_copy(small_bucket, rank=rank, log=log, divide=divide, process_group=process_group) + + # allows using reduction of gradients instead of using all_reduce + + def buffered_reduce_fallback(self, rank, grads, elements_per_buffer=500000000, log=None): + split_buckets = split_half_float_double(grads) + + for i, bucket in enumerate(split_buckets): + self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer, rank=rank, log=log) + + ############################################################################# + ############################################################################# + ############################################################################# + + # views the tensor as multiple partitions and returns + # those partitions + def get_data_parallel_partitions(self, tensor, group_id): + partitions = [] + + dp = dist.get_world_size(group=self.real_dp_process_group[group_id]) + # dp_id = dist.get_rank(group=self.real_dp_process_group[group_id]) + + total_num_elements = tensor.numel() + + base_size = total_num_elements // dp + remaining = total_num_elements % dp + + start = 0 + for id in range(dp): + partition_size = base_size + if id < remaining: + partition_size = partition_size + 1 + partitions.append(tensor.narrow(0, start, partition_size)) + start = start + partition_size + return partitions + + def get_partition_info(self, tensor_list, partition_size, partition_id): + params_in_partition = [] + params_not_in_partition = [] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for tensor in tensor_list: + + tensor_size = tensor.numel() + + if start_index <= current_index < end_index: + params_in_partition.append(tensor) + + elif current_index < start_index < (current_index + tensor_size): + params_in_partition.append(tensor) + + assert (first_offset == 0 + ), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + else: + params_not_in_partition.append(tensor) + + current_index = current_index + tensor_size + + return params_in_partition, params_not_in_partition, first_offset + + def zero_grad(self, set_to_none=True): + """ + Zero FP16 parameter grads. + """ + # FP32 grad should never exist. + # For speed, set model fp16 grad to None by default + # zero all pointers to grad tensors + for group in self.bit16_groups: + for p in group: + if set_to_none: + p.grad = None # epilogue and in step + p.grad_accum = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def _model_parallel_all_reduce(self, tensor, op): + """ Perform all reduce within model parallel group, if any. + """ + if self.model_parallel_group is None or self.model_parallel_world_size == 1: + pass + else: + dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) + + def get_grad_norm_direct(self, gradients, params, norm_type=2): + """Clips gradient norm of an iterable of parameters. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + norm_type = float(norm_type) + all_norms = [] + if norm_type == inf: + for g in gradients: + all_norms.append(g.data.abs().max().float()) + total_norm = torch.stack(all_norms).max() + dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=self.dp_process_group) + + # Take max across all GPUs. + self._model_parallel_all_reduce(tensor=total_norm, op=dist.ReduceOp.MAX) + else: + # if dist.get_rank() == 0: + # logger.info(f"Total Norm beginning {total_norm}") + for g, p in zip(gradients, params): + # Pipeline parallelism may replicate parameters. Avoid multi-counting. + if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: + continue + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + all_norms.append( + torch.norm(g.data.double().detach(), norm_type).to(get_accelerator().current_device_name())) + if len(all_norms) > 0: + total_norm = torch.stack(all_norms).square().sum().float() + else: + total_norm = torch.tensor(0.0, dtype=torch.float32).to(self.device) + # Sum across all model parallel Device. + dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm, op=dist.ReduceOp.SUM) + + total_norm = total_norm.pow(1. / norm_type) + + norm_is_inf = total_norm.isinf() + norm_is_nan = total_norm.isnan() + inf_or_nan = norm_is_nan.logical_or(norm_is_inf) + + err = torch.tensor(-1.0, device=self.device, dtype=torch.float) + total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm + return total_norm + + # creates a flat fused tensor from the tensor list starting at the first_offset + # in the first tensor of the list. If there are not enough elements in the tensor + # list then the flat tensor will be padded with zeros + def get_flat_partition(self, tensor_list, first_offset, partition_size, dtype, device, return_tensor_list=False): + flat_tensor_list = [] + current_size = 0 + + for i, tensor in enumerate(tensor_list): + grad_accum = self.get_param_gradient_attribute(tensor) + if grad_accum is None: + grad_accum = torch.zeros_like(tensor, dtype=dtype) + + tensor = grad_accum + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_size): + num_elements = partition_size - current_size + + # we need a narrow view of the tensor based on the tensor offset and number of elements that + # we need from this tensor + if tensor_offset > 0 or num_elements < tensor.numel(): + flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements))) + else: + flat_tensor_list.append(tensor) + + current_size = current_size + num_elements + + # this means its the last partition and does not align with the dp boundary. We need to pad before flattening + if current_size < partition_size: + flat_tensor_list.append(torch.zeros(int(partition_size - current_size), dtype=dtype, device=device)) + + if return_tensor_list: + return flat_tensor_list + + return self.flatten(flat_tensor_list) + + def free_grad_in_param_list(self, param_list): + for p in param_list: + p.grad = None # in step + p.grad_accum = None + + def reset_cpu_buffers(self): + self.norm_for_param_grads = {} + self.local_overflow = False + + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + + def scaled_global_norm(self, norm_type=2): + assert norm_type == 2, "only L2 norm supported" + norm_groups = [] + for i, group in enumerate(self.bit16_groups): + if self.cpu_offload: + # complete complete_grad_norm_calculation_for_cpu_offload return python float, moving back to + # torch.tensor as else statement returns tensor as well + norm = torch.tensor(self.complete_grad_norm_calculation_for_cpu_offload(self.params_in_partition[i]), + device=self.device) + norm_groups.append(norm) + else: + norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.params_in_partition[i])) + + if self.has_moe_layers: + self._average_expert_grad_norms(norm_groups) + + # calculating L2 norm + return torch.norm(torch.stack(norm_groups), p=norm_type) + + def get_bit16_param_group(self, group_no): + bit16_partitions = self.parallel_partitioned_bit16_groups[group_no] + partition_id = dist.get_rank(group=self.real_dp_process_group[group_no]) + return [bit16_partitions[dist.get_rank(group=self.real_dp_process_group[group_no])]] + + def _optimizer_step(self, group_no): + original_param_groups = self.optimizer.param_groups + self.optimizer.param_groups = [original_param_groups[group_no]] + # Disabling this as the C++ side copy & synchronize is not working correctly + #from deepspeed.ops.adam import DeepSpeedCPUAdam + #if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half: + # self.optimizer.step(fp16_param_groups=[self.get_bit16_param_group(group_no)]) + #else: + # self.optimizer.step() + self.optimizer.step() + self.optimizer.param_groups = original_param_groups + + # We need to link optimizer state after the first step() call + self._lazy_init_hp_params_optimizer_state() + + def step(self, closure=None): + """ + Not supporting closure. + """ + self.micro_step_id = -1 + + see_memory_usage(f"In step before checking overflow") + + # First compute norm for all group so we know if there is overflow + if self.dtype == torch.float16: + self.check_overflow() + + prev_scale = self.loss_scale + self._update_scale(self.overflow) + if self.overflow: + see_memory_usage('After overflow before clearing gradients') + self.zero_grad(set_to_none=True) + if self.cpu_offload: + self.reset_cpu_buffers() + else: + self.averaged_gradients = {} + + see_memory_usage('After overflow after clearing gradients') + + for timer in OPTIMIZER_TIMERS: + self.timers(timer).start() + self.timers(timer).stop() + return + + # Step 1:- Calculate gradient norm using bit-16 grads + see_memory_usage('Before norm calculation') + scaled_global_grad_norm = self.scaled_global_norm() + self._global_grad_norm = scaled_global_grad_norm / prev_scale + see_memory_usage('After norm before optimizer') + + # Step 2:- run optimizer and upscaling simultaneously + for i, group in enumerate(self.bit16_groups): + self.timers(OPTIMIZER_GRADIENTS_TIMER).start() + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if self.cpu_offload: + single_grad_partition = self.single_partition_of_fp32_groups[i].grad + self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm) + + self.timers(OPTIMIZER_GRADIENTS_TIMER).stop() + self.timers(OPTIMIZER_STEP_TIMER).start() + self._optimizer_step(i) + + # Disabled, this is not currently working + #from deepspeed.ops.adam import DeepSpeedCPUAdam + #if not (type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half): + # bit16_partitions = self.parallel_partitioned_bit16_groups[i] + # fp32_partition = self.single_partition_of_fp32_groups[i] + # bit16_partitions[partition_id].data.copy_(fp32_partition.data) + bit16_partitions = self.parallel_partitioned_bit16_groups[i] + fp32_partition = self.single_partition_of_fp32_groups[i] + bit16_partitions[partition_id].data.copy_( + fp32_partition.to(get_accelerator().current_device_name()).data) + + self.timers(OPTIMIZER_STEP_TIMER).stop() + else: + # free gradients for all the parameters that are not updated by this process(ZeRO stage2) + self.free_grad_in_param_list(self.params_not_in_partition[i]) + + # create a flat gradients for parameters updated by this process + # If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + single_grad_partition = self.flatten_dense_tensors_aligned( + self.averaged_gradients[i], + int(self.partition_size[i])).to(self.single_partition_of_fp32_groups[i].dtype) + else: + single_grad_partition = self.flatten(self.averaged_gradients[i]).to( + self.single_partition_of_fp32_groups[i].dtype) + assert single_grad_partition.numel() == self.partition_size[i], \ + "averaged gradients have different number of elements that partition size {} {} {} {}".format( + single_grad_partition.numel(), self.partition_size[i], i, partition_id) + + self.single_partition_of_fp32_groups[i].grad = single_grad_partition + # release all the gradient since we have already created a necessary copy in dp_grad_partition(ZeRO stage2) + self.free_grad_in_param_list(self.params_in_partition[i]) + + self.averaged_gradients[i] = None + + self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm) + + self.timers(OPTIMIZER_GRADIENTS_TIMER).stop() + + # Step 3:- run the optimizer if no offloading + self.timers(OPTIMIZER_STEP_TIMER).start() + self._optimizer_step(i) + # Step 4:- get rid of the fp32 gradients. Not needed anymore + self.single_partition_of_fp32_groups[i].grad = None + del single_grad_partition + bit16_partitions = self.parallel_partitioned_bit16_groups[i] + fp32_partition = self.single_partition_of_fp32_groups[i] + bit16_partitions[partition_id].data.copy_(fp32_partition.data) + self.timers(OPTIMIZER_STEP_TIMER).stop() + + see_memory_usage('After optimizer before all-gather') + if self.cpu_offload: + self.reset_cpu_buffers() + + self.timers(OPTIMIZER_ALLGATHER_TIMER).start() + # Gather the updated weights from everyone. + # Then all partitions of the model parameters are updated and ready for next round forward. + all_gather_dp_groups(groups_flat=self.bit16_groups_flat, + partitioned_param_groups=self.parallel_partitioned_bit16_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + self.timers(OPTIMIZER_ALLGATHER_TIMER).stop() + + # TODO: we probably don't need this? just to be safe + for i in range(len(self.bit16_groups)): + self._update_model_bit16_weights(i) + + self.timers.log(OPTIMIZER_TIMERS) + see_memory_usage('After zero_optimizer step') + + return + + @torch.no_grad() + def update_lp_params(self): + for i, (bit16_partitions, fp32_partition) in enumerate( + zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + bit16_partitions[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + all_gather_dp_groups(groups_flat=self.bit16_groups_flat, + partitioned_param_groups=self.parallel_partitioned_bit16_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + + def _average_expert_grad_norms(self, norm_groups): + for i, norm in enumerate(norm_groups): + if self.is_moe_param_group[i]: + scaled_norm_tensor = norm * 1.0 / dist.get_world_size(group=self.real_dp_process_group[i]) + if self.device == 'cpu': + scaled_norm_tensor = scaled_norm_tensor.to(get_accelerator().current_device_name()) + dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i]) + norm_groups[i] = scaled_norm_tensor.to(self.device) + + def unscale_and_clip_grads(self, grad_groups_flat, total_norm): + # compute combined scale factor for this group + combined_scale = self.loss_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.loss_scale + + for grad in grad_groups_flat: + if isinstance(grad, list): + sub_partitions = grad + for g in sub_partitions: + g.data.mul_(1. / combined_scale) + else: + grad.data.mul_(1. / combined_scale) + + def _check_overflow(self, partition_gradients=True): + self.overflow = self.has_overflow(partition_gradients) + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params): + invalid_grad_count = torch.zeros([1], dtype=torch.float, device=get_accelerator().current_device_name()) + for p in params: + if p.grad is not None: + invalid_grad_count += self._has_inf_or_nan(p.grad) + return invalid_grad_count.bool() + + def has_overflow_partitioned_grads_serial(self): + invalid_grad_count = torch.zeros([1], dtype=torch.float, device=get_accelerator().current_device_name()) + for i in range(len(self.bit16_groups)): + for j, grad in enumerate(self.averaged_gradients[i]): + if grad is not None: + invalid_grad_count += self._has_inf_or_nan(grad) + return invalid_grad_count.bool() + + def has_overflow(self, partition_gradients=True): + if partition_gradients: + overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial() + overflow_gpu = get_accelerator().ByteTensor([overflow]) if self.cpu_offload else overflow.byte().to( + get_accelerator().current_device_name()) + '''This will capture overflow across all data parallel and expert parallel process + Since expert parallel process are a subset of data parallel process''' + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group) + + else: + params = [] + for group in self.bit16_groups: + for param in group: + params.append(param) + overflow_gpu = self.has_overflow_serial(params).byte().to(get_accelerator().current_device_name()) + + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the model parallel GPUs + self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) + + overflow = overflow_gpu[0].item() + return bool(overflow) + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x, j=None): + float_x = x.float() + nan = float_x.isnan() + inf = float_x.isinf() + inf_or_nan = nan.logical_or(inf) + return inf_or_nan.float().max() + + def backward(self, loss, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + self.micro_step_id += 1 + + if self.contiguous_gradients: + self.ipg_buffer = [] + buf_0 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=get_accelerator().current_device_name()) + self.ipg_buffer.append(buf_0) + + # Use double buffers to avoid data access conflict when overlap_comm is enabled. + if self.overlap_comm: + buf_1 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=get_accelerator().current_device_name()) + self.ipg_buffer.append(buf_1) + self.ipg_index = 0 + + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + + # Only for Stage 1, Mode 2 + if self.use_grad_accum_attribute: + self.fill_grad_accum_attribute() + + def check_overflow(self, partition_gradients=True): + self._check_overflow(partition_gradients) + + def _update_scale(self, has_overflow=False): + self.loss_scaler.update_scale(has_overflow) + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.loss_scaler.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + cur_scale = property(_get_loss_scale, _set_loss_scale) + + # Return group tensor after removing paddings that are added for alignment to DP world size. + # This method works on the assumption that each group contains a single flattened tensor. + def _get_groups_without_padding(self, groups_with_padding): + groups_without_padding = [] + for i, group in enumerate(groups_with_padding): + lean_length = group.numel() - self.groups_padding[i] + groups_without_padding.append(group[:lean_length]) + + return groups_without_padding + + # Return optimizer state after removing paddings that are added for alignment. + def _get_state_without_padding(self, state_with_padding, padding): + lean_state = {} + for key, value in state_with_padding.items(): + if torch.is_tensor(value): + lean_length = value.numel() - padding + lean_state[key] = value[:lean_length] + else: + lean_state[key] = value + + return lean_state + + # Return base optimizer states. + # This method assumes that each param group contains a single flattened tensor. + def _get_base_optimizer_state(self): + optimizer_groups_state = [] + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + lean_optimizer_state = self._get_state_without_padding(self.optimizer.state[p], self.groups_padding[i]) + optimizer_groups_state.append(lean_optimizer_state) + + return optimizer_groups_state + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + Example:: + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + state_dict[LOSS_SCALER] = self.loss_scaler + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['overflow'] = self.overflow + state_dict[CLIP_GRAD] = self.clip_grad + + if self.elastic_checkpoint: + state_dict[BASE_OPTIMIZER_STATE] = self._get_base_optimizer_state() + + if "step" in self.optimizer.param_groups[0]: + # Assuming "step" is the only item that changes through training iterations + assert all(group["step"] == self.optimizer.param_groups[0]["step"] + for group in self.optimizer.param_groups), "All param groups must have the same step value" + state_dict[BASE_OPTIMIZER_STATE_STEP] = self.optimizer.param_groups[0]["step"] + else: + state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() + + # Remove paddings for DP alignment to enable loading for other alignment values + fp32_groups_without_padding = self._get_groups_without_padding(self.single_partition_of_fp32_groups) + state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding + + state_dict[ + ZERO_STAGE] = ZeroStageEnum.gradients if self.partition_gradients else ZeroStageEnum.optimizer_states + state_dict[GROUP_PADDINGS] = self.groups_padding + state_dict[PARTITION_COUNT] = self.partition_count + + state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings + + return state_dict + + # Restore base optimizer fp32 weights from elastic checkpoint by: + # 1) Merging fp32 weights from checkpoints of all partitions + # 2) Extracting fp32 weights for current partition from merged weights + # 3) Using extracted weights to update base optimizer weights directly. + def _restore_from_elastic_fp32_weights(self, all_state_dict): + merged_single_partition_of_fp32_groups = [] + + for i in range(len(self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + merged_partitions = [sd[SINGLE_PARTITION_OF_FP32_GROUPS][i] for sd in all_state_dict] + if self.is_moe_group(self.optimizer.param_groups[i]): + ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name']) + merged_partitions = [merged_partitions[i] for i in ranks] + flat_merged_partitions = self.flatten_dense_tensors_aligned( + merged_partitions, + self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i])) + dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, i) + merged_single_partition_of_fp32_groups.append(dp_partitions[partition_id]) + + for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups): + current.data.copy_(saved.data) + + # Restore base optimizer fp32 weights from ZeRO fp16 or bfloat16 weights + def _restore_from_bit16_weights(self): + for group_id, (bit16_partitions, fp32_partition) in enumerate( + zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank(group=self.real_dp_process_group[group_id]) + fp32_partition.data.copy_(bit16_partitions[partition_id].data) + + # Refresh the fp32 master params from the fp16 or bfloat16 copies. + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + # Extract optimizer state for current partition from merged states of all partitions + def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id): + partition_id = dist.get_rank(group=self.real_dp_process_group[group_id]) + alignment = self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[group_id]) + if torch.is_tensor(all_partition_states[0]): + flat_merged_partitions = self.flatten_dense_tensors_aligned(all_partition_states, alignment) + dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, group_id) + return dp_partitions[partition_id] + else: + # Assume non-tensor states are not partitioned and equal across ranks, so return first one + return all_partition_states[0] + + def _restore_step_from_elastic_checkpoint(self, all_state_dict): + assert BASE_OPTIMIZER_STATE_STEP in all_state_dict[0] + assert all(sd[BASE_OPTIMIZER_STATE_STEP] == all_state_dict[0][BASE_OPTIMIZER_STATE_STEP] + for sd in all_state_dict), "State dicts of all partitions must have the same step value" + return all_state_dict[0][BASE_OPTIMIZER_STATE_STEP] + + def _restore_base_optimizer_state(self, base_optimizer_group_states, base_optimizer_state_step, group_paddings): + if type(base_optimizer_group_states) == dict: + base_optimizer_group_states = base_optimizer_group_states['state'] + + saved_keys = base_optimizer_group_states[0].keys() + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + padding = 0 if group_paddings is None else group_paddings[i] + for key in saved_keys: + saved = base_optimizer_group_states[i][key] + + if torch.is_tensor(saved): + if key in self.optimizer.state[p]: + dst_tensor = self.optimizer.state[p][key] + src_tensor = _get_padded_tensor(saved, dst_tensor.numel()) + self.optimizer.state[p][key].data.copy_(src_tensor.data) + else: + self.optimizer.state[p][key] = _pad_tensor_by_size( + saved, padding, torch.float32, + torch.device('cpu') if self.cpu_offload else self.device) + else: + self.optimizer.state[p][key] = saved + + for param_group in self.optimizer.param_groups: + param_group['step'] = base_optimizer_state_step + + def get_ep_ranks(self, rank=0, group_name=None): + from deepspeed.utils import groups + expert_parallel_size_ = groups._get_expert_parallel_world_size(group_name) + world_size = groups._get_data_parallel_world_size() + rank = groups._get_expert_parallel_rank(group_name) + ranks = range(rank, world_size, expert_parallel_size_) + return list(ranks) + + # Restore base optimizer state from elastic checkpoint by + # 1) Merging optimizer state from checkpoints of all partitions + # 2) Extracting optimizer state for current partition from the merged state + # 3) Using the extracted value to directly update the base optimizer. + def _restore_elastic_base_optimizer_state(self, all_state_dict): + base_optimizer_group_states = [] + for i in range(len(self.optimizer.param_groups)): + partition_states = {} + all_partition_group_states = [sd[BASE_OPTIMIZER_STATE][i] for sd in all_state_dict] + + if self.is_moe_group(self.optimizer.param_groups[i]): + ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name']) + all_partition_group_states = [all_partition_group_states[i] for i in ranks] + + for key in all_partition_group_states[0].keys(): + all_partition_states = [all_states[key] for all_states in all_partition_group_states] + partition_states[key] = self._partition_base_optimizer_state(key, all_partition_states, i) + base_optimizer_group_states.append(partition_states) + + self._restore_base_optimizer_state(base_optimizer_group_states, + self._restore_step_from_elastic_checkpoint(all_state_dict), None) + + def load_state_dict(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False, + checkpoint_folder=None, + load_serial=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) + + def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): + self.load_hp_checkpoint_state_from_checkpoint_dir("bit16_groups", checkpoint_folder) + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def _load_global_state(self, sd): + self.loss_scaler = sd.get(LOSS_SCALER, self.loss_scaler) + self.dynamic_loss_scale = sd.get('dynamic_loss_scale', self.dynamic_loss_scale) + self.overflow = sd.get('overflow', self.overflow) + self.clip_grad = sd.get(CLIP_GRAD, self.clip_grad) + + ckpt_version = sd.get(DS_VERSION, False) + assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" + ckpt_version = pkg_version.parse(ckpt_version) + + # zero stage 1 mode + if not self.partition_gradients: + required_version = pkg_version.parse("0.3.17") + error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \ + "with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \ + "please use an older version of DeepSpeed (<= 0.5.8) and set 'legacy_stage1': true in your zero config json." + assert required_version <= ckpt_version, f"Old version: {ckpt_version} {error_str}" + + def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): + r"""Loading ZeRO checkpoint + + Arguments: + state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. + Note that the number of saved partitions may differ from number of loading partitions to support + changing GPU count, specifically DP world size, between saving and loading checkpoints. + load_optimizer_states: Boolean indicating whether or not to load base optimizer states + load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). + """ + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + Example:: + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + + # I think it should actually be ok to reload the optimizer before the model. + dp_rank = dist.get_rank(group=self.dp_process_group) + current_rank_sd = state_dict_list[dp_rank] + self._load_global_state(current_rank_sd) + + ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict) + + # padding is always at the last rank/partition + # if DP=1024 and param-group elems=16 -> padding will be 1024-16 across all but one rank + # scenario-1 (shrink): saving w. 4 gpus -> loading w. 2 gpus + # scenario-2 (expand): saving w. 2 gpus -> loading w. 4 gpus + # if load_optimizer_states: + # if new_dp_size: + # self.strip_padding() + # self.add_padding_w_new_dp_size() + # self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + + if load_optimizer_states: + if ckpt_is_rigid: + # loading rigid ckpt into either rigid or elastic exec + self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + else: + if self.elastic_checkpoint: + # loading elastic into elastic exec + self._restore_elastic_base_optimizer_state(state_dict_list) + else: + # loading an elastic checkpoint into rigid exec + self._restore_base_optimizer_state(current_rank_sd[BASE_OPTIMIZER_STATE], + current_rank_sd[BASE_OPTIMIZER_STATE_STEP], + current_rank_sd[GROUP_PADDINGS]) + + # At this point, the optimizer's references to the model's fp32 parameters are up to date. + # The optimizer's hyperparameters and internal buffers are also up to date. + # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still + # out of date. There are two options. + # 1: Refresh the master params from the model's fp16 params. + # This requires less storage but incurs precision loss. + # 2: Save and restore the fp32 master copies separately. + # We choose option 1 if changing DP degree and option 2 otherwise. + # + # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device + # of their associated parameters, because it's possible those buffers might not exist yet in + # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been + # constructed in the same way as the one whose state_dict we are loading, the same master params + # are guaranteed to exist, so we can just copy_() from the saved master params. + + if load_from_fp32_weights: + # option 2 from above + if self.elastic_checkpoint and not ckpt_is_rigid: + self._restore_from_elastic_fp32_weights(state_dict_list) + else: + # For non-elastic checkpoint, simply copying from saved weights of current rank is sufficient. + for current, saved in zip(self.single_partition_of_fp32_groups, + current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): + src_tensor = _get_padded_tensor(saved, current.numel()) + current.data.copy_(src_tensor.data) + else: + # option 1 from above + self._restore_from_bit16_weights() + + if load_optimizer_states: + self._link_all_hp_params() + + +def _handle_overflow(cpu_sum, x, i): + import math + rank = dist.get_rank() + if rank == 0: + t_i = -1 + for v_i, v in enumerate(x.data.contiguous().view(-1)): + if not math.isfinite(float(v)): + t_i = v_i + break + logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") + + +def estimate_zero2_model_states_mem_needs(total_params, + num_gpus_per_node=1, + num_nodes=1, + cpu_offload=True, + additional_buffer_factor=1.5): + + total_gpus = num_nodes * num_gpus_per_node + + if cpu_offload: + gpu_mem = 2 * total_params + cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor + else: + gpu_mem = 4 * total_params + int(16 * total_params / total_gpus) + cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor + + return int(cpu_mem), int(gpu_mem) + + +def model_to_params(model): + # shared params calculated only once + total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) + return total_params + + +def estimate_zero2_model_states_mem_needs_all_live(model, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients + for a given ``model`` and hardware setup. + + If you have an actual model object, use this function and everything will be derived + automatically. + + If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass + the ``total_params`` explicitly. + + Args: + - ``model``: ``nn.Module`` object + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + total_params = model_to_params(model) + + estimate_zero2_model_states_mem_needs_all_cold(total_params=total_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + additional_buffer_factor=additional_buffer_factor) + + +def estimate_zero2_model_states_mem_needs_all_cold(total_params, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients + for a given ``model`` and hardware setup. + + If it's a hypothetical model, use this function where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything + will be derived automatically. + + Args: + - ``total_params``: total model params + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + def format_options(cpu_offload): + enabled = [] + device = f'{OffloadDeviceEnum.cpu:4}' if cpu_offload else "none" + enabled.append(f"offload_optimizer={device}") + return ", ".join(enabled) + + nodes_str = "nodes" if num_nodes > 1 else "node" + gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" + print("Estimated memory needed for params, optim states and gradients for a:\n" + f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" + f"SW: Model with {int(total_params/1e6)}M total params.") + print(" per CPU | per GPU | Options") + for cpu_offload in [True, False]: + cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(total_params=total_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + cpu_offload=cpu_offload, + additional_buffer_factor=additional_buffer_factor) + + options_str = format_options(cpu_offload=cpu_offload) + print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}") diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/test.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/test.py new file mode 100644 index 0000000000000000000000000000000000000000..1904a8d395be80a4322bc25bd1008a80bdd8f982 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/test.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.runtime.zero.contiguous_memory_allocator import ContiguousMemoryAllocator + + +def test1(): + mem = ContiguousMemoryAllocator(1024, torch.half, 'cpu') + mem.print_allocation(resolution=100) + a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0) + mem.print_allocation(resolution=100) + mem.release_tensor(a1) + mem.print_allocation(resolution=100) + a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0) + a3 = mem.allocate_tensor(256).mul_(0.0).add_(3.0) + a4 = mem.allocate_tensor(128).mul_(0.0).add_(4.0) + mem.print_allocation(resolution=100) + mem.release_tensor(a3) + mem.print_allocation(resolution=100) + a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0) + a6 = mem.allocate_tensor(256).mul_(0.0).add_(6.0) + a7 = mem.allocate_tensor(128).mul_(0.0).add_(7.0) + mem.print_allocation(resolution=100) + a8 = mem.allocate_tensor(256).mul_(0.0).add_(8.0) + a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0) + mem.print_allocation(resolution=100) + mem.release_tensor(a9) + mem.release_tensor(a6) + mem.release_tensor(a2) + mem.release_tensor(a5) + + a10 = mem.allocate_tensor(512).mul_(0.0).add_(10.0) + mem.print_allocation(resolution=100) + #print(f"a4:{a4}") + #print(f"a7:{a7}") + #print(f"a8:{a8}") + #print(f"a10:{a10}") + assert (a4.norm() + a7.norm() + a8.norm() + a10.norm()).item() == 474.50, "Test failed" + + +def test2(): + mem = ContiguousMemoryAllocator(512, torch.half, 'cpu') + a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0) + a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0) + a3 = mem.allocate_tensor(64).mul_(0.0).add_(3.0) + a4 = mem.allocate_tensor(64).mul_(0.0).add_(4.0) + a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0) + a6 = mem.allocate_tensor(64).mul_(0.0).add_(6.0) + a7 = mem.allocate_tensor(64).mul_(0.0).add_(7.0) + a8 = mem.allocate_tensor(64).mul_(0.0).add_(8.0) + mem.release_tensor(a2) + mem.release_tensor(a4) + mem.release_tensor(a6) + mem.release_tensor(a8) + mem.print_allocation(resolution=100) + + a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0) + a10 = mem.allocate_tensor(64).mul_(0.0).add_(10.0) + a11 = mem.allocate_tensor(64).mul_(0.0).add_(11.0) + mem.release_tensor(a1) + mem.release_tensor(a5) + mem.print_allocation(resolution=100) + a12 = mem.allocate_tensor(128).mul_(0.0).add_(12.0) + mem.print_allocation(resolution=100) + print(f"a7:{a7}") + print(f"a9:{a9}") + print(f"a10:{a10}") + print(f"a11:{a11}") + print(f"a12:{a12}") + assert (a7.norm() + a9.norm() + a10.norm() + a11.norm() + a12.norm()) == 460.75, "TestFailed" + + +test1() +test2() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/tiling.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/tiling.py new file mode 100644 index 0000000000000000000000000000000000000000..af1c5e54737a75cb66b39736718bf62169952a80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/tiling.py @@ -0,0 +1,296 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import deepspeed +from deepspeed.runtime.utils import partition_uniform as partition + + +def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False): + """Split a tensor along its last dimension. Adapted from Megatron-LM. + + Arguments: + tensor: input tensor. + partitions: list of partition sizes to supply to torch.split + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + + # Split. + tensor_list = torch.split(tensor, partitions, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +class TiledLinear(torch.nn.Module): + + def __init__(self, + in_features, + out_features, + bias=True, + in_splits=1, + out_splits=1, + input_is_already_split=False, + combine_out_splits=True, + linear_cls=torch.nn.Linear, + init_linear=None, + **kwargs): + """A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce + memory requirements via tiling. + + TiledLinear breaks the input and output dimensions of a linear layer + into tiles that are processed in sequence. This class enables huge + linear layers when combined with ZeRO-3 because inactive tiles can be + partitioned and offloaded. + + .. note:: + We recommend using as few tiles as necessary. Tiling + significantly reduces memory usage, but can reduce throughput + for inexpensive layers. This due to the smaller kernels having + less parallelism and lower arithmetic intensity, while + introducing more frequent synchronization and communication. + + Args: + in_features (int): See ``torch.nn.Linear`` + out_features (int): See ``torch.nn.Linear`` + bias (bool, optional): See ``torch.nn.Linear`` + in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1. + out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1. + input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in + to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``. + combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs + into a single tensor. Defaults to ``True``. + linear_cls (class, optional): The underlying class to build individual tiles. + Defaults to ``torch.nn.Linear``. + init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of + ``init_linear``. Useful for debugging. Defaults to ``None``. + kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``. + + Raises: + RuntimeError: ``in_splits`` must be within the range [1, in_features). + RuntimeError: ``out_splits`` must be within the range of [1, out_features). + """ + + super().__init__() + + if (in_splits < 1) or (in_splits > in_features): + raise RuntimeError('in splits must be in range [1, in_features].') + if (out_splits < 1) or (out_splits > out_features): + raise RuntimeError('out splits must be in range [1, out_features].') + + # global, not necessarily local + self.in_features = in_features + self.out_features = out_features + self.use_bias = bias + + self.out_splits = out_splits + self.in_splits = in_splits + self.input_is_already_split = input_is_already_split + self.combine_out_splits = combine_out_splits + + # Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features] + # For example, row_parts[p] gives the start of partition p and row_parts[p+1] + # is the exclusive end. + self.in_parts = partition(num_items=in_features, num_parts=in_splits) + self.out_parts = partition(num_items=out_features, num_parts=out_splits) + + assert len(self.out_parts) == out_splits + 1 + assert len(self.in_parts) == in_splits + 1 + assert self.out_parts[0] == 0 + assert self.out_parts[out_splits] == out_features + assert self.in_parts[in_splits] == in_features + + self.linears = torch.nn.ModuleList() + for out_id in range(out_splits): + self.linears.append(torch.nn.ModuleList()) + + local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id] + + for in_id in range(in_splits): + #if input_size is split, we only need one bias + local_bias = bias if in_id == (in_splits - 1) else False + + local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id] + local = linear_cls(local_in_dim, local_out_dim, bias=local_bias, **kwargs) + self.linears[out_id].append(local) + + # Optionally initialize with a known tensor + if init_linear is not None: + self.copy_params_from(init_linear) + + def forward(self, input_): + if self.in_splits > 1 and not self.input_is_already_split: + input_parts = partition(input_.shape[-1], self.in_splits) + split_sizes = [input_parts[p + 1] - input_parts[p] for p in range(self.in_splits)] + inputs = self._split_global_input(input_, split_sizes) + elif self.in_splits > 1: + inputs = input_ + assert len( + inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}" + else: + # no splits + inputs = [input_] + + outputs = [None] * self.out_splits + for out_id in range(self.out_splits): + for in_id in range(self.in_splits): + local_output = self.linears[out_id][in_id](inputs[in_id]) + + outputs[out_id] = self._reduce_local_output(in_id=in_id, + out_id=out_id, + current_out=outputs[out_id], + new_out=local_output) + + if self.combine_out_splits: + return self._combine_output_splits(outputs) + + return outputs + + def _split_global_input(self, input, split_sizes): + """Partition an input tensor along the last dimension, aligned with given splits. + + Subclasses should override this method to account for new input types. + + Args: + input (List[Tensor]): The tensor to partition along the last dimension. + split_sizes (List[int]): The size of each partition. + + Returns: + List[Any]: A list of the chunks of ``input``. + """ + return split_tensor_along_last_dim(input, split_sizes) + + def _reduce_local_output(self, in_id, out_id, current_out, new_out): + """Reduce (sum) a new local result into the existing local results. + + Subclasses should override this method. + + For a given ``out_id``, this method is called ``in_id-1`` times. The first input + split is a simple assignment. + + Args: + in_id (int): The input split that produced ``new_out``. + out_id (int): The output split that produced ``new_out``. + current_out (Any): The reduced form of all previous ``out_id`` results. + new_out (Any): The local result from forward (``in_id``, ``out_id``)e + + Returns: + Any: The combined result of ``current_out`` and ``new_out``. + """ + + if current_out is None: + #this clone is necessary to preserve auto grad + #there is some issue with inplace update for outputs that are views + return new_out.clone() + else: + return current_out + new_out + + def _combine_output_splits(self, outputs): + """Join the splits of the output into a single result. + + Args: + outputs (List[Any]): The reduced outputs for each output split. + + Returns: + Any: The combined outputs. + """ + assert len(outputs) == self.out_splits + return torch.cat(outputs, dim=-1) + + @torch.no_grad() + def copy_params_from(self, other): + """Copy the weight and bias data from ``other``. + + This is especially useful for reproducible initialization and testing. + + Equivalent to: + + .. code-block:: python + + with torch.no_grad(): + self.weight.copy_(other.weight) + if self.bias is not None: + self.bias.copy_(other.bias) + + .. note:: + If ZeRO-3 is enabled, this is a collective operation and the + updated parameters of data-parallel rank 0 will be visible on all + ranks. See :class:`deepspeed.zero.GatheredParameters` for more + information. + + + Args: + other (``torch.nn.Linear``): the linear layer to copy from. + """ + assert hasattr(other, 'weight') + assert other.weight.size() == (self.out_features, self.in_features) + if self.use_bias: + assert hasattr(other, 'bias') + assert other.bias is not None + assert other.bias.size() == (self.out_features, ) + else: + assert other.bias is None + + for row in range(self.out_splits): + rstart = self.out_parts[row] + rstop = self.out_parts[row + 1] + + for col in range(self.in_splits): + cstart = self.in_parts[col] + cstop = self.in_parts[col + 1] + + local = self.linears[row][col] + global_weight = other.weight[rstart:rstop, cstart:cstop] + with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0): + local.weight.copy_(global_weight) + + if local.bias is not None: + with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0): + local.bias.data.copy_(other.bias[rstart:rstop].data) + + +class TiledLinearReturnBias(TiledLinear): + """Wrapper for a Linear class that returns its own bias parameter, such as + used by Megatron-LM. + """ + + def _reduce_local_output(self, in_id, out_id, current_out, new_out): + """Reduces output tensors, but not the returned bias. """ + if current_out is not None: + old_tensor, old_bias = current_out + else: + old_tensor, old_bias = None, None + + assert isinstance(new_out, tuple) + assert len(new_out) == 2 + + tensor, bias = new_out + assert tensor is not None + + tensor = super()._reduce_local_output(in_id=in_id, out_id=out_id, current_out=old_tensor, new_out=tensor) + + if bias is None: + bias = old_bias + + return tensor, bias + + def _combine_output_splits(self, outputs): + # stack output tensors + tensors = [o[0] for o in outputs] + tensor = super()._combine_output_splits(tensors) + + # stack biases if applicable + biases = [o[1] for o in outputs if o[1] is not None] + if len(biases) > 0: + bias = super()._combine_output_splits(biases) + else: + bias = None + + return tensor, bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f61715bd43873e393771f85382fc09336b2ccece --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/utils.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from typing import List + +import torch +from deepspeed import comm as dist +from deepspeed.utils import logger +from deepspeed.ops.adam import DeepSpeedCPUAdam +from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad +from deepspeed.ops.adam import FusedAdam +from deepspeed.ops.lion import DeepSpeedCPULion, FusedLion +from deepspeed.utils.nvtx import instrument_w_nvtx +from deepspeed.accelerator import get_accelerator + + +def _initialize_parameter_parallel_groups(parameter_parallel_size=None): + data_parallel_size = int(dist.get_world_size()) + parameter_parallel_size = parameter_parallel_size or data_parallel_size + logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size) + assert data_parallel_size % parameter_parallel_size == 0, \ + 'world size should be divisible by parameter parallel size' + rank = dist.get_rank() + my_group = None + for i in range(data_parallel_size // parameter_parallel_size): + ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size) + group = dist.new_group(ranks) + if rank in ranks: + my_group = group + return my_group + + +class ZeRORuntimeException(Exception): + pass + + +ZERO_SUPPORTED_OPTIMIZERS = [ + torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad, + DeepSpeedCPULion, FusedLion +] + +# Add apex FusedAdam to supported list if apex is installed +try: + import apex + if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'): + ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam) +except ImportError: + pass + + +def is_zero_supported_optimizer(optimizer): + if dist.get_rank() == 0: + logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}') + return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS + + +def get_lst_from_rank0(lst: List[int]) -> None: + """ + NOTE: creates both communication and synchronization overhead so should be used + sparingly + """ + lst_tensor = torch.tensor( + lst if dist.get_rank() == 0 else [-1] * len(lst), + dtype=int, + # device=get_accelerator().current_device_name(), + device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])), + requires_grad=False, + ) + dist.broadcast(lst_tensor, src=0, async_op=False) + + return list(lst_tensor.cpu().numpy()) + + +@instrument_w_nvtx +def assert_ints_same_as_other_ranks(ints: List[int]) -> None: + """ + NOTE: creates both communication and synchronization overhead so should be + used sparingly + + takes a list of ints from each rank and ensures that they are the same + across ranks, throwing an exception if they are not. + """ + rank0_ints = get_lst_from_rank0(ints) + if ints != rank0_ints: + raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: " + f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}") + + +def is_builtin_type(obj): + # https://stackoverflow.com/a/17795199 + return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins" + + +def isinstance_namedtuple(obj: object) -> bool: + """ + Is this an instance of namedtuple/NamedTuple? + From: https://stackoverflow.com/a/62692640 + + Args: + obj (object): An object. + + Returns: + bool: True if namedtuple/NamedTuple else False. + """ + return isinstance(obj, tuple) and hasattr(obj, '_asdict') and hasattr(obj, '_fields') + + +def is_zero_param(parameter): + if not torch.is_tensor(parameter): + return False + return hasattr(parameter, 'ds_id') + + +def apply_to_tensors_only(function, value, warning_msg_fn=None): + """ + Apply `function` to every Tensor in `value`. + + Args: + functional: The function class to apply. + value (Any): Target object to apply `function` to. + + Returns: + Any: Output of `function`. + """ + if isinstance(value, (tuple, list)): + touched_outputs = [] + for elem in value: + touched_output = apply_to_tensors_only(function, elem) + touched_outputs.append(touched_output) + + if isinstance_namedtuple(value): + # namedtuples require a slightly different syntax. + return value.__class__(*touched_outputs) + + return value.__class__(touched_outputs) + elif isinstance(value, dict): + # apply inplace to avoid recreating dict inherited objects + for key in value.keys(): + value[key] = apply_to_tensors_only(function, value[key]) + return value + + elif isinstance(value, torch.Tensor): + # this also applies to torch.Tensor's subclasses like torch.nn.parameter.Parameter + touched_output = function(value) + + # restore zero param attributes if those get stripped by `backward_function` + if not is_zero_param(touched_output) and is_zero_param(value): + touched_output.ds_param_alias = value + + return touched_output + else: + if not is_builtin_type(value): + global warned + if warning_msg_fn and not warned and dist.get_rank() == 0: + logger.warning(warning_msg_fn(value)) + warned = True + return value diff --git a/venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so b/venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so new file mode 100644 index 0000000000000000000000000000000000000000..e2902360b254845622871621f99bd361100cb177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9254d0854dd7615e11de28d771ae408878ca8123a7ac204f21e4cc7a376cc2e5 +size 35123345 diff --git a/venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so b/venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so new file mode 100644 index 0000000000000000000000000000000000000000..febf7d70cc32960a699501352364ea21c8269d37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09f0031d06ac6f2a67010411a6507849eaa826754ca553a95459ac18a4775e5f +size 34990041 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg b/venv/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56350635174c5d062428d0128910faa0476b66ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/images/flower.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a77f6ec41e353afdf8bdff2ea981b2955535d8d83294f8cfa49cf4e423dd5638 +size 142987 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a54754b666113a517f58ff509416f461e92636e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdq-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3736e7feb7ad30c68675c2c4e48a9fb262e80308c9083b100ddd0339da1fc282 +size 348 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..2ad2b8b4fd397ee8d61b44fb77b26076f643335d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/data-v1-dl-21854866.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8d00c6690576a9ec39e1cb77054e13296be0fdebab0fb35a64a0e8627b6e6f3 +size 4519 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdf-62.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdf-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..22da3f227189e339ed4d2b3861866ded65d999a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdf-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:489b177126cb7f335cb220709233b946d3a0ad71d38bba6d48b79187146e585a +size 817 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/data-v1-dl-52352.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/data-v1-dl-52352.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3ce4b7991c2223af6097adb8d1f553088d1ece0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/data-v1-dl-52352.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb5830c82112f62a400c82ac1f1b5eb61c29c0a7cc72ba56d2aeff0fae8a60f9 +size 1625 diff --git a/venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..44045565e87ba72e92e03ecbbb72572d4746c019 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e4732f10ce97c9f5e903774dcb953c0f1518f40b27e287992075311835cefce +size 11815960