diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efed1ef84aca357f5a2f1a06d41c254c01f36a3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .abstract_accelerator import DeepSpeedAccelerator +from .real_accelerator import get_accelerator, set_accelerator, is_current_accelerator_supported diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cpu_accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cpu_accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e6430848b6a23cc6303dcf5dda412634becdb7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cpu_accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/hpu_accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/hpu_accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11b6c926debb36c88576e913ee775be4b8d24d2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/hpu_accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/mps_accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/mps_accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a8c3a3f9abb1016080bed9da31c0eca018f1a12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/mps_accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/npu_accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/npu_accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd658b3de42b305a271ee6ef39ee466c9a7b3260 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/npu_accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/xpu_accelerator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/xpu_accelerator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb6bed266fcbc9d35c30e7326ef7b636b3cae418 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/xpu_accelerator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/abstract_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/abstract_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..768d5ea34e5ea41b6d18f3e44ed996ded1d6340c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/abstract_accelerator.py @@ -0,0 +1,297 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import abc +from abc import ABC + + +class DeepSpeedAccelerator(ABC): + + def __init__(self): + self._name = None + self._communication_backend_name = None + + @abc.abstractmethod + def is_synchronized_device(self): + ... + + @abc.abstractmethod + def use_host_timers(self): + ... + + @abc.abstractmethod + def resolves_data_dependency(self): + ... + + @abc.abstractmethod + def handles_memory_backpressure(self): + ... + + # Device APIs + @abc.abstractmethod + def device_name(self, device_index): + ... + + @abc.abstractmethod + def device(self, device_index): + ... + + @abc.abstractmethod + def set_device(self, device_index): + ... + + @abc.abstractmethod + def current_device(self): + ... + + @abc.abstractmethod + def current_device_name(self): + ... + + @abc.abstractmethod + def device_count(self): + ... + + @abc.abstractmethod + def synchronize(self, device_index=None): + ... + + # RNG APIs + @abc.abstractmethod + def random(self): + ... + + @abc.abstractmethod + def set_rng_state(self, new_state, device_index=None): + ... + + @abc.abstractmethod + def get_rng_state(self, device_index=None): + ... + + @abc.abstractmethod + def manual_seed(self, seed): + ... + + @abc.abstractmethod + def manual_seed_all(self, seed): + ... + + @abc.abstractmethod + def initial_seed(self, seed): + ... + + @abc.abstractmethod + def default_generator(self, device_index): + ... + + # Streams/Events + @property + @abc.abstractmethod + def Stream(self): + ... + + @abc.abstractmethod + def stream(self, stream): + ... + + @abc.abstractmethod + def current_stream(self, device_index=None): + ... + + @abc.abstractmethod + def default_stream(self, device_index=None): + ... + + @property + @abc.abstractmethod + def Event(self): + ... + + # Memory management + @abc.abstractmethod + def empty_cache(self): + ... + + @abc.abstractmethod + def memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def reset_max_memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def reset_max_memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def memory_stats(self, device_index=None): + ... + + @abc.abstractmethod + def reset_peak_memory_stats(self, device_index=None): + ... + + @abc.abstractmethod + def memory_reserved(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_reserved(self, device_index=None): + ... + + @abc.abstractmethod + def total_memory(self, device_index=None): + ... + + @abc.abstractmethod + def available_memory(self, device_index=None): + ... + + # Data types + @abc.abstractmethod + def is_bf16_supported(self): + ... + + @abc.abstractmethod + def is_fp16_supported(self): + ... + + @abc.abstractmethod + def supported_dtypes(self): + ... + + # Misc + @abc.abstractmethod + def amp(self): + ... + + @abc.abstractmethod + def is_available(self): + ... + + @abc.abstractmethod + def range_push(self, msg): + ... + + @abc.abstractmethod + def range_pop(self): + ... + + @abc.abstractmethod + def lazy_call(self, callback): + ... + + @abc.abstractmethod + def communication_backend_name(self): + ... + + @abc.abstractmethod + def is_triton_supported(self): + ... + + # Graph operations + @abc.abstractmethod + def create_graph(self): + ... + + @abc.abstractmethod + def capture_to_graph(self, graph, pool=None, stream=None): + ... + + @abc.abstractmethod + def replay_graph(self, graph): + ... + + # Tensor operations + @property + @abc.abstractmethod + def BFloat16Tensor(self): + ... + + @property + @abc.abstractmethod + def ByteTensor(self): + ... + + @property + @abc.abstractmethod + def DoubleTensor(self): + ... + + @property + @abc.abstractmethod + def FloatTensor(self): + ... + + @property + @abc.abstractmethod + def HalfTensor(self): + ... + + @property + @abc.abstractmethod + def IntTensor(self): + ... + + @property + @abc.abstractmethod + def LongTensor(self): + ... + + @abc.abstractmethod + def pin_memory(self, tensor, align_bytes=1): + ... + + @abc.abstractmethod + def is_pinned(self, tensor): + ... + + @abc.abstractmethod + def on_accelerator(self, tensor): + ... + + @abc.abstractmethod + def op_builder_dir(self): + ... + + # create an instance of op builder, specified by class_name + @abc.abstractmethod + def create_op_builder(self, class_name): + ... + + # return an op builder class, specified by class_name + @abc.abstractmethod + def get_op_builder(self, class_name): + ... + + @abc.abstractmethod + def build_extension(self): + ... + + @abc.abstractmethod + def export_envs(self): + ... + + @abc.abstractmethod + def visible_devices_envs(self): + ... + + @abc.abstractmethod + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + ... diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/cpu_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/cpu_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..237e7f51dcb405595a09e2753cfabd7e137f6afa --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/cpu_accelerator.py @@ -0,0 +1,332 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from .abstract_accelerator import DeepSpeedAccelerator + +try: + import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore + oneccl_imported_p = True +except ImportError as e: + oneccl_imported_p = False + +import os + + +# accelerator for Intel CPU +class CPU_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + self._name = 'cpu' + if oneccl_imported_p: + self._communication_backend_name = 'ccl' + else: + # fallback to gloo if oneccl_binding_for_pytorch is not installed + self._communication_backend_name = 'gloo' + try: + import psutil + mem = psutil.Process().memory_info().rss + self.max_mem = mem + except ImportError as e: + self.max_mem = 0 + + def is_synchronized_device(self): + return True + + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + + # Device APIs + def device_name(self, device_index=None): + return 'cpu' + + def device(self, device_index=None): + return None + + def set_device(self, device_index): + return + + def current_device(self): + return os.environ.get('LOCAL_RANK', 0) + + def current_device_name(self): + return 'cpu' + + def device_count(self): + device_count = int(os.environ.get('LOCAL_SIZE', 0)) + if device_count > 0: + return device_count + else: + from deepspeed.utils.numa import get_numa_cores + # Count NUMA node for number of cpu accelerators. On machine with HBM + # In flat mode, HBM is in separate NUMA node with no cores on this node. + # Ignore these NUMA nodes with no cores. + numa_core_lists = get_numa_cores() + numa_count = 0 + prev_core_list = [] + for core_list in numa_core_lists: + if len(core_list) > 0 and core_list != prev_core_list: + numa_count += 1 + prev_core_list = core_list + return numa_count + + def synchronize(self, device_index=None): + return + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + if device_index is None: + return torch.set_rng_state(new_state) + return torch.set_rng_state(new_state, device_index) + + def get_rng_state(self, device_index=None): + return torch.get_rng_state() + + def manual_seed(self, seed): + return torch.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.manual_seed(seed) + + def initial_seed(self, seed): + return torch.initial_seed(seed) + + def default_generator(self, device_index): + return torch.default_generator + + # Streams/Events + @property + def Stream(self): + return None + + def stream(self, stream): + from deepspeed.runtime.utils import noop_context + return noop_context() + + def current_stream(self, device_index=None): + return None + + def default_stream(self, device_index=None): + return None + + @property + def Event(self): + return None + + # Memory management + def empty_cache(self): + return + + def get_rss(self): + import psutil + mem = psutil.Process().memory_info().rss + if mem > self.max_mem: + self.max_mem = mem + return mem + + def reset_rss(self): + import psutil + mem = psutil.Process().memory_info().rss + self.max_mem = mem + return mem + + def memory_allocated(self, device_index=None): + return self.get_rss() + + def max_memory_allocated(self, device_index=None): + self.get_rss() + return self.max_mem + + def reset_max_memory_allocated(self, device_index=None): + self.reset_rss() + return + + def memory_cached(self, device_index=None): + return self.get_rss() + + def max_memory_cached(self, device_index=None): + self.get_rss() + return self.max_mem + + def reset_max_memory_cached(self, device_index=None): + self.reset_rss() + return + + def memory_stats(self, device_index=None): + mem = self.get_rss() + mem_stat = {} + mem_stat['allocated_bytes.all.current'] = mem + mem_stat['allocated_bytes.all.peak'] = self.max_mem + return mem_stat + + def reset_peak_memory_stats(self, device_index=None): + self.reset_rss() + return + + def memory_reserved(self, device_index=None): + return self.get_rss() + + def max_memory_reserved(self, device_index=None): + self.get_rss() + return self.max_mem + + def total_memory(self, device_index=None): + import psutil + return psutil.virtual_memory().total + + def available_memory(self, device_index=None): + import psutil + return psutil.virtual_memory().available + + # Misc + def amp(self): + return torch.cpu.amp + + def is_available(self): + return True + + def range_push(self, msg): + # TODO itt is currently not supported yet + # return torch.profiler.itt.range_push(msg) + return + + def range_pop(self): + # TODO itt is currently not supported yet + # return torch.profiler.itt.range_pop() + return + + def lazy_call(self, callback): + return callback() + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + return False + + # Data types + def is_bf16_supported(self): + return True + + def is_fp16_supported(self): + return False + + def supported_dtypes(self): + return [torch.float, torch.bfloat16] + + # Graph operations + def create_graph(self): + return None + + def capture_to_graph(self, graph, pool=None, stream=None): + from deepspeed.runtime.utils import noop_context + return noop_context() + + def replay_graph(self, graph): + return + + # Tensor operations + @property + def BFloat16Tensor(self): + return torch.BFloat16Tensor + + @property + def ByteTensor(self): + return torch.ByteTensor + + @property + def DoubleTensor(self): + return torch.DoubleTensor + + @property + def FloatTensor(self): + return torch.FloatTensor + + @property + def HalfTensor(self): + return torch.HalfTensor + + @property + def IntTensor(self): + return torch.IntTensor + + @property + def LongTensor(self): + return torch.LongTensor + + def pin_memory(self, tensor, align_bytes=1): + return tensor + + def is_pinned(self, tensor): + return tensor.is_pinned() + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + return "op_builder.cpu" + except ImportError: + return "deepspeed.ops.op_builder.cpu" + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('cpu'): + return True + else: + return False + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, op_name): + builder_class = self.get_op_builder(op_name) + if builder_class is not None: + return builder_class() + return None + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.cpu import CCLCommBuilder, ShareMemCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder + except ImportError: + from deepspeed.ops.op_builder.cpu import CCLCommBuilder, ShareMemCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder + + if class_name == "CCLCommBuilder": + return CCLCommBuilder + elif class_name == "ShareMemCommBuilder": + return ShareMemCommBuilder + elif class_name == "FusedAdamBuilder": + return FusedAdamBuilder + elif class_name == "CPUAdamBuilder": + return CPUAdamBuilder + else: + # return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests + return NotImplementedBuilder + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + return BuildExtension + + def export_envs(self): + return [] + + # TODO: cpu's visible envs is confirmed, keep as CUDA_VISIBLE_DEVICES + def visible_devices_envs(self): + return ['CUDA_VISIBLE_DEVICES'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/cuda_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/cuda_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc0cfd94125547f7508c544748266e0da7ce103 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/cuda_accelerator.py @@ -0,0 +1,369 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import functools +import os +import pkgutil +import importlib + +from .abstract_accelerator import DeepSpeedAccelerator +# During setup stage torch may not be installed, pass on no torch will +# allow op builder related API to be executed. +try: + import torch.cuda +except ImportError: + pass + +# Delay import pynvml to avoid import error when CUDA is not available +pynvml = None + + +class CUDA_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + self._name = 'cuda' + self._communication_backend_name = 'nccl' + if pynvml is None: + self._init_pynvml() + + def _init_pynvml(self): + global pynvml + try: + import pynvml + except ImportError: + return + try: + pynvml.nvmlInit() + except pynvml.NVMLError: + pynvml = None + return + + def is_synchronized_device(self): + return False + + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + + # Device APIs + def device_name(self, device_index=None): + if device_index is None: + return 'cuda' + return 'cuda:{}'.format(device_index) + + def device(self, device_index=None): + return torch.cuda.device(device_index) + + def set_device(self, device_index): + torch.cuda.set_device(device_index) + + def current_device(self): + return torch.cuda.current_device() + + def current_device_name(self): + return 'cuda:{}'.format(torch.cuda.current_device()) + + def device_count(self): + return torch.cuda.device_count() + + def synchronize(self, device_index=None): + return torch.cuda.synchronize(device_index) + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + if device_index is None: + return torch.cuda.set_rng_state(new_state) + + return torch.cuda.set_rng_state(new_state, device_index) + + def get_rng_state(self, device_index=None): + if device_index is None: + return torch.cuda.get_rng_state() + + return torch.cuda.get_rng_state(device_index) + + def manual_seed(self, seed): + return torch.cuda.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.cuda.manual_seed_all(seed) + + def initial_seed(self, seed): + return torch.cuda.initial_seed(seed) + + def default_generator(self, device_index): + return torch.cuda.default_generators[device_index] + + # Streams/Events + @property + def Stream(self): + return torch.cuda.Stream + + def stream(self, stream): + return torch.cuda.stream(stream) + + def current_stream(self, device_index=None): + return torch.cuda.current_stream(device_index) + + def default_stream(self, device_index=None): + return torch.cuda.default_stream(device_index) + + @property + def Event(self): + return torch.cuda.Event + + # Memory management + def empty_cache(self): + return torch.cuda.empty_cache() + + def memory_allocated(self, device_index=None): + return torch.cuda.memory_allocated(device_index) + + def max_memory_allocated(self, device_index=None): + return torch.cuda.max_memory_allocated(device_index) + + def reset_max_memory_allocated(self, device_index=None): + return torch.cuda.reset_max_memory_allocated(device_index) + + def memory_cached(self, device_index=None): + return torch.cuda.memory_cached(device_index) + + def max_memory_cached(self, device_index=None): + return torch.cuda.max_memory_cached(device_index) + + def reset_max_memory_cached(self, device_index=None): + return torch.cuda.reset_max_memory_cached(device_index) + + def memory_stats(self, device_index=None): + if hasattr(torch.cuda, 'memory_stats'): + return torch.cuda.memory_stats(device_index) + + def reset_peak_memory_stats(self, device_index=None): + if hasattr(torch.cuda, 'reset_peak_memory_stats'): + return torch.cuda.reset_peak_memory_stats(device_index) + + def memory_reserved(self, device_index=None): + if hasattr(torch.cuda, 'memory_reserved'): + return torch.cuda.memory_reserved(device_index) + + def max_memory_reserved(self, device_index=None): + if hasattr(torch.cuda, 'max_memory_reserved'): + return torch.cuda.max_memory_reserved(device_index) + + def total_memory(self, device_index=None): + return torch.cuda.get_device_properties(device_index).total_memory + + def _get_nvml_gpu_id(self, torch_gpu_id): + """ + credit: https://discuss.pytorch.org/t/making-pynvml-match-torch-device-ids-cuda-visible-devices/103020 + + Remap torch device id to nvml device id, respecting CUDA_VISIBLE_DEVICES. + + If the latter isn't set return the same id + """ + # if CUDA_VISIBLE_DEVICES is used automagically remap the id since pynvml ignores this env var + if "CUDA_VISIBLE_DEVICES" in os.environ: + ids = list(map(int, os.environ.get("CUDA_VISIBLE_DEVICES", "").split(","))) + return ids[torch_gpu_id] # remap + else: + return torch_gpu_id + + def available_memory(self, device_index=None): + if pynvml: + if device_index is None: + device_index = self.current_device() + handle = pynvml.nvmlDeviceGetHandleByIndex(self._get_nvml_gpu_id(device_index)) + info = pynvml.nvmlDeviceGetMemoryInfo(handle) + return info.free + else: + return self.total_memory(device_index) - self.memory_allocated(device_index) + + # Data types + def is_bf16_supported(self): + if not torch.cuda.is_available(): + return True + return torch.cuda.is_bf16_supported() + + def is_fp16_supported(self): + if not torch.cuda.is_available(): + return True + # See https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#hardware-precision-matrix + # FP16 on compute capability 6.x is deprecated + allow_deprecated_fp16 = os.environ.get('DS_ALLOW_DEPRECATED_FP16', '0') == '1' + major, _ = torch.cuda.get_device_capability() + if major >= 7: + return True + elif major == 6 and allow_deprecated_fp16: + return True + else: + return False + + def supported_dtypes(self): + supported_dtypes = [torch.float] + if self.is_fp16_supported(): + supported_dtypes.append(torch.half) + if self.is_bf16_supported(): + supported_dtypes.append(torch.bfloat16) + return supported_dtypes + + # Misc + def amp(self): + if hasattr(torch.cuda, 'amp'): + return torch.cuda.amp + return None + + def is_available(self): + return torch.cuda.is_available() + + def range_push(self, msg): + if hasattr(torch.cuda.nvtx, 'range_push'): + return torch.cuda.nvtx.range_push(msg) + + def range_pop(self): + if hasattr(torch.cuda.nvtx, 'range_pop'): + return torch.cuda.nvtx.range_pop() + + def lazy_call(self, callback): + return torch.cuda._lazy_call(callback) + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + major, _ = torch.cuda.get_device_capability() + if major >= 8: + return True + else: + return False + + # Graph operations + def create_graph(self): + return torch.cuda.CUDAGraph() + + def capture_to_graph(self, graph, pool=None, stream=None): + return torch.cuda.graph(graph, pool, stream) + + def replay_graph(self, graph): + graph.replay() + return + + # Tensor operations + + @property + def BFloat16Tensor(self): + return functools.partial(torch.tensor, dtype=torch.bfloat16, device='cuda') + + @property + def ByteTensor(self): + return functools.partial(torch.tensor, dtype=torch.uint8, device='cuda') + + @property + def DoubleTensor(self): + return functools.partial(torch.tensor, dtype=torch.double, device='cuda') + + @property + def FloatTensor(self): + return functools.partial(torch.tensor, dtype=torch.float, device='cuda') + + @property + def HalfTensor(self): + return functools.partial(torch.tensor, dtype=torch.half, device='cuda') + + @property + def IntTensor(self): + return functools.partial(torch.tensor, dtype=torch.int, device='cuda') + + @property + def LongTensor(self): + return functools.partial(torch.tensor, dtype=torch.long, device='cuda') + + def pin_memory(self, tensor, align_bytes=1): + return tensor.pin_memory() + + def is_pinned(self, tensor): + return tensor.is_pinned() + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('cuda:'): + return True + else: + return False + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + return "op_builder" + except ImportError: + return "deepspeed.ops.op_builder" + + # dict that holds class name <--> class type mapping i.e. + # 'AsyncIOBuilder': + # this dict will be filled at init stage + class_dict = None + + def _lazy_init_class_dict(self): + if self.class_dict is not None: + return + else: + self.class_dict = {} + # begin initialize for create_op_builder() + # put all valid class name <--> class type mapping into class_dict + op_builder_dir = self.op_builder_dir() + op_builder_module = importlib.import_module(op_builder_dir) + op_builder_absolute_path = os.path.dirname(op_builder_module.__file__) + for _, module_name, _ in pkgutil.iter_modules([op_builder_absolute_path]): + # avoid self references, + # skip sub_directories which contains ops for other backend(cpu, npu, etc.). + if module_name != 'all_ops' and module_name != 'builder' and not os.path.isdir( + os.path.join(op_builder_absolute_path, module_name)): + module = importlib.import_module("{}.{}".format(op_builder_dir, module_name)) + for member_name in module.__dir__(): + if member_name.endswith( + 'Builder' + ) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes + if not member_name in self.class_dict: + self.class_dict[member_name] = getattr(module, member_name) + # end initialize for create_op_builder() + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, class_name): + self._lazy_init_class_dict() + if class_name in self.class_dict: + return self.class_dict[class_name]() + else: + return None + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + self._lazy_init_class_dict() + if class_name in self.class_dict: + return self.class_dict[class_name] + else: + return None + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + return BuildExtension + + def export_envs(self): + return ['NCCL'] + + def visible_devices_envs(self): + return ['CUDA_VISIBLE_DEVICES'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/hpu_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/hpu_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..326efc8fa01b4958b172e58d350df0ec813c30aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/hpu_accelerator.py @@ -0,0 +1,303 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import pkgutil +import importlib +import torch + +from .abstract_accelerator import DeepSpeedAccelerator + + +class HPU_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + self._name = 'hpu' + self._communication_backend_name = 'hccl' + try: + import habana_frameworks.torch.hpu as hpu + hpu.setDeterministic(True) + self.hpu = hpu + except ImportError as e: + raise ValueError( + f"HPU_Accelerator requires habana_frameworks.torch.hpu, which is not installed on this system.") + + self.fp16_supported = None + + # Device APIs + def is_synchronized_device(self): + return False + + def use_host_timers(self): + return False + + def resolves_data_dependency(self): + return True + + def handles_memory_backpressure(self): + return True + + def device_name(self, device_index=None): + if device_index is None: + return 'hpu' + return 'hpu:{}'.format(device_index) + + def device(self, device_index=None): + return torch.device(self.device_name(device_index)) + + def set_device(self, device_index): + self.hpu.set_device(device_index) + + def current_device(self): + return (self.hpu.current_device()) + + def current_device_name(self): + return 'hpu:{}'.format(self.current_device()) + + def device_count(self): + return self.hpu.device_count() + + def synchronize(self, device_index=None): + return self.hpu.synchronize() + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + self.hpu.random.set_rng_state(new_state) + + def get_rng_state(self, device_index=None): + return self.hpu.random.get_rng_state() + + def manual_seed(self, seed): + self.hpu.random.manual_seed(seed) + + def manual_seed_all(self, seed): + self.hpu.random.manual_seed_all(seed) + + def initial_seed(self, seed): + self.hpu.random.initial_seed(seed) + + def default_generator(self, device_index): + return self.hpu.random.default_generators[device_index] + + # Streams/Events + @property + def Stream(self): + return self.hpu.Stream + + def stream(self, stream): + return self.hpu.stream(stream) + + def current_stream(self, device_index=None): + return self.hpu.current_stream() + + def default_stream(self, device_index=None): + return self.hpu.default_stream() + + @property + def Event(self): + import habana_frameworks.torch.core as htcore + return htcore.hpu.Event + + # Memory management + def empty_cache(self): + return + + def memory_allocated(self, device_index=None): + return self.hpu.memory_allocated() + + def max_memory_allocated(self, device_index=None): + return self.hpu.max_memory_allocated() + + def reset_max_memory_allocated(self, device_index=None): + return self.hpu.reset_max_memory_allocated() + + def memory_cached(self, device_index=None): + return self.hpu.memory_cached(device_index) + + def max_memory_cached(self, device_index=None): + return self.hpu.max_memory_cached(device_index) + + def reset_max_memory_cached(self, device_index=None): + return None + + def memory_stats(self, device_index=None): + return self.hpu.memory_stats(device_index) + + def reset_peak_memory_stats(self, device_index=None): + self.hpu.reset_peak_memory_stats(device_index) + + def memory_reserved(self, device_index=None): + return self.hpu.memory_reserved(device_index) + + def max_memory_reserved(self, device_index=None): + return self.hpu.max_memory_reserved(device_index) + + def total_memory(self, device_index=None): + return self.memory_stats(device_index)['Limit'] + + def available_memory(self, device_index=None): + return self.total_memory(device_index) - self.memory_allocated(device_index) + + # Data types + def is_bf16_supported(self): + return True + + def is_fp16_supported(self): + if self.fp16_supported is None: + import habana_frameworks.torch.utils.experimental as htexp + self.fp16_supported = htexp._is_fp16_supported() + return self.fp16_supported + + def supported_dtypes(self): + supported_dtypes = [torch.float, torch.bfloat16] + if self.is_fp16_supported(): + supported_dtypes.append(torch.half) + return supported_dtypes + + # Misc + def amp(self): + return None + + def is_available(self): + return self.hpu.is_available() + + def range_push(self, msg): + return + + def range_pop(self): + return + + def lazy_call(self, callback): + callback() + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + return False + + # Graph operations + def create_graph(self): + return self.hpu.HPUGraph() + + def capture_to_graph(self, graph, pool=None, stream=None): + return self.hpu.graph(graph, stream=stream) + + def replay_graph(self, graph): + graph.replay() + return + + # Tensor operations + @property + def BFloat16Tensor(self): + return self.hpu.BFloat16Tensor + + @property + def ByteTensor(self): + return self.hpu.ByteTensor + + @property + def DoubleTensor(self): + return self.hpu.DoubleTensor + + @property + def FloatTensor(self): + return self.hpu.FloatTensor + + @property + def HalfTensor(self): + return self.hpu.HalfTensor + + @property + def IntTensor(self): + return self.hpu.IntTensor + + @property + def LongTensor(self): + return self.hpu.LongTensor + + def pin_memory(self, tensor, align_bytes=1): + return tensor.pin_memory(self.device()) + + def is_pinned(self, tensor): + return tensor.is_pinned() + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('hpu:'): + return True + else: + return False + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + return "op_builder.hpu" + except ImportError: + return "deepspeed.ops.op_builder.hpu" + + # dict that holds class name <--> class type mapping i.e. + # 'AsyncIOBuilder': + # this dict will be filled at init stage + class_dict = None + + def _lazy_init_class_dict(self): + if self.class_dict is not None: + return + else: + self.class_dict = {} + # begin initialize for create_op_builder() + # put all valid class name <--> class type mapping into class_dict + op_builder_dir = self.op_builder_dir() + op_builder_module = importlib.import_module(op_builder_dir) + op_builder_absolute_path = os.path.dirname(op_builder_module.__file__) + for _, module_name, _ in pkgutil.iter_modules([op_builder_absolute_path]): + # avoid self references, + # skip sub_directories which contains ops for other backend(cpu, npu, etc.). + if module_name != 'all_ops' and module_name != 'builder' and not os.path.isdir( + os.path.join(op_builder_absolute_path, module_name)): + module = importlib.import_module("{}.{}".format(op_builder_dir, module_name)) + for member_name in module.__dir__(): + if member_name.endswith( + 'Builder' + ) and member_name != "OpBuilder" and member_name != "CPUOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes + if not member_name in self.class_dict: + self.class_dict[member_name] = getattr(module, member_name) + # end initialize for create_op_builder() + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, class_name): + self._lazy_init_class_dict() + if class_name in self.class_dict: + return self.class_dict[class_name]() + else: + return None + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + self._lazy_init_class_dict() + if class_name in self.class_dict: + return self.class_dict[class_name] + else: + return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + return BuildExtension + + def export_envs(self): + return [] + + def visible_devices_envs(self): + return ['HABANA_VISIBLE_MODULES'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/mps_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/mps_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..ff70b860d7c7c127f77dcda5d221eed6483cb45d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/mps_accelerator.py @@ -0,0 +1,269 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .abstract_accelerator import DeepSpeedAccelerator + +# During setup stage torch may not be installed, pass on no torch will +# allow op builder related API to be executed. +try: + import torch.mps +except ImportError: + pass + + +class MPS_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + self._name = "mps" + self._communication_backend_name = None + + def is_synchronized_device(self): + return False + + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + + # Device APIs + def device_name(self, device_index=None): + if device_index is None: + return "mps" + return "mps:{}".format(device_index) + + def device(self, device_index): + return torch.device("mps", index=0) + + def set_device(self, device_index): + return + + def current_device(self): + return torch.device("mps", index=0) + + def current_device_name(self): + return "mps:0" + + def device_count(self): + return 1 + + def synchronize(self, device_index=None): + return torch.mps.synchronize() + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + return torch.mps.set_rng_state(new_state) + + def get_rng_state(self, device_index=None): + return torch.mps.get_rng_state() + + def manual_seed(self, seed): + return torch.mps.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.mps.manual_seed(seed) + + def seed(self): + return torch.mps.seed() + + def initial_seed(self, seed): + return + + def default_generator(self, device_index): + return + + # Streams/Events + @property + def Stream(self): + return None + + def stream(self, stream): + return None + + def current_stream(self, device_index=None): + return None + + def default_stream(self, device_index=None): + return None + + @property + def Event(self): + return None + + # Memory management + def empty_cache(self): + return torch.mps.empty_cache() + + def memory_allocated(self, device_index=None): + return torch.mps.current_allocated_memory() + + def max_memory_allocated(self, device_index=None): + return torch.mps.driver_allocated_memory() + + def set_per_process_memory_fraction(self, fraction): + return torch.mps.set_per_process_memory_fraction(fraction) + + def reset_max_memory_allocated(self, device_index=None): + return + + def memory_cached(self, device_index=None): + return + + def max_memory_cached(self, device_index=None): + return + + def reset_max_memory_cached(self, device_index=None): + return + + def memory_stats(self, device_index=None): + return + + def reset_peak_memory_stats(self, device_index=None): + return + + def memory_reserved(self, device_index=None): + return + + def max_memory_reserved(self, device_index=None): + return + + def total_memory(self, device_index=None): + return + + def available_memory(self, device_index=None): + return + + # Data types + def is_bf16_supported(self): + return False + + def is_fp16_supported(self): + return False + + def supported_dtypes(self): + return [torch.float] + + # Misc + def amp(self): + return + + def is_available(self): + return hasattr(torch.backends, "mps") and torch.backends.mps.is_available() + + def range_push(self, msg): + return + + def range_pop(self): + return + + def lazy_call(self, callback): + return + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + return False + + # Graph operations + def create_graph(self): + return None + + def capture_to_graph(self, graph, pool=None, stream=None): + from deepspeed.runtime.utils import noop_context + return noop_context() + + def replay_graph(self, graph): + return + + # Tensor operations + @property + def BFloat16Tensor(self): + return + + @property + def ByteTensor(self): + return + + @property + def DoubleTensor(self): + return + + @property + def FloatTensor(self): + return + + @property + def HalfTensor(self): + return + + @property + def IntTensor(self): + return + + @property + def LongTensor(self): + return + + def pin_memory(self, tensor, align_bytes=1): + return tensor.pin_memory() + + def is_pinned(self, tensor): + return tensor.is_pinned() + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith("mps"): + return True + else: + return False + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + + return "op_builder" + except ImportError: + return "deepspeed.ops.op_builder" + + # create an instance of op builder, specified by class_name + def create_op_builder(self, op_name): + builder_class = self.get_op_builder(op_name) + if builder_class is not None: + return builder_class() + return None + + # return an op builder class, specified by class_name + def get_op_builder(self, class_name): + from deepspeed.ops.op_builder.cpu import NotImplementedBuilder + + return NotImplementedBuilder + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + + return BuildExtension + + def export_envs(self): + return [] + + # TODO: mpu's visible envs is confirmed, keep as CUDA_VISIBLE_DEVICES + def visible_devices_envs(self): + # TODO: could not find visible devices env for mps + return ['CUDA_VISIBLE_DEVICES'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/npu_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/npu_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..5d891ecb707dd7ab016e61e5abcd3bf0ac0a22c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/npu_accelerator.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import importlib +import inspect + +from .abstract_accelerator import DeepSpeedAccelerator +# During setup stage torch may not be installed, pass on no torch will +# allow op builder related API to be executed. +try: + import torch.npu +except ImportError: + pass + + +class NPU_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + super().__init__() + self._name = 'npu' + self._communication_backend_name = 'hccl' + # dict that holds class name <--> class type mapping i.e. + # 'AsyncIOBuilder': + # this dict will be filled at init stage + self.class_dict = None + + def is_synchronized_device(self): + return False + + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + + # Device APIs + def device_name(self, device_index=None): + if device_index is None: + return 'npu' + return 'npu:{}'.format(device_index) + + def device(self, device_index=None): + return torch.npu.device(device_index) + + def set_device(self, device_index): + torch.npu.set_device(device_index) + + def current_device(self): + return torch.npu.current_device() + + def current_device_name(self): + return 'npu:{}'.format(torch.npu.current_device()) + + def device_count(self): + return torch.npu.device_count() + + def synchronize(self, device_index=None): + return torch.npu.synchronize(device_index) + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + if device_index is None: + return torch.npu.set_rng_state(new_state) + + return torch.npu.set_rng_state(new_state, device_index) + + def get_rng_state(self, device_index=None): + if device_index is None: + return torch.npu.get_rng_state() + + return torch.npu.get_rng_state(device_index) + + def manual_seed(self, seed): + return torch.npu.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.npu.manual_seed_all(seed) + + def initial_seed(self, seed): + return torch.npu.initial_seed(seed) + + def default_generator(self, device_index): + return torch.npu.default_generators[device_index] + + # Streams/Events + @property + def Stream(self): + return torch.npu.Stream + + def stream(self, stream): + return torch.npu.stream(stream) + + def current_stream(self, device_index=None): + return torch.npu.current_stream(device_index) + + def default_stream(self, device_index=None): + return torch.npu.default_stream(device_index) + + @property + def Event(self): + return torch.npu.Event + + # Memory management + def empty_cache(self): + return torch.npu.empty_cache() + + def memory_allocated(self, device_index=None): + return torch.npu.memory_allocated(device_index) + + def max_memory_allocated(self, device_index=None): + return torch.npu.max_memory_allocated(device_index) + + def reset_max_memory_allocated(self, device_index=None): + return torch.npu.reset_max_memory_allocated(device_index) + + def memory_cached(self, device_index=None): + return torch.npu.memory_cached(device_index) + + def max_memory_cached(self, device_index=None): + return torch.npu.max_memory_cached(device_index) + + def reset_max_memory_cached(self, device_index=None): + return torch.npu.reset_max_memory_cached(device_index) + + def memory_stats(self, device_index=None): + if hasattr(torch.npu, 'memory_stats'): + return torch.npu.memory_stats(device_index) + + def reset_peak_memory_stats(self, device_index=None): + if hasattr(torch.npu, 'reset_peak_memory_stats'): + return torch.npu.reset_peak_memory_stats(device_index) + + def memory_reserved(self, device_index=None): + if hasattr(torch.npu, 'memory_reserved'): + return torch.npu.memory_reserved(device_index) + + def max_memory_reserved(self, device_index=None): + if hasattr(torch.npu, 'max_memory_reserved'): + return torch.npu.max_memory_reserved(device_index) + + def total_memory(self, device_index=None): + return torch.npu.get_device_properties(device_index).total_memory + + def available_memory(self, device_index=None): + return self.total_memory(device_index) - self.memory_allocated(device_index) + + # Data types + def is_bf16_supported(self): + return torch.npu.is_bf16_supported() + + def is_fp16_supported(self): + return True + + def supported_dtypes(self): + return [torch.float, torch.half, torch.bfloat16] + + # Misc + def amp(self): + if hasattr(torch.npu, 'amp'): + return torch.npu.amp + return None + + def is_available(self): + return torch.npu.is_available() + + def range_push(self, msg): + return + + def range_pop(self): + return + + def lazy_call(self, callback): + return torch.npu._lazy_call(callback) + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + return False + + # Graph operations + def create_graph(self): + return None + + def capture_to_graph(self, graph, pool=None, stream=None): + from deepspeed.runtime.utils import noop_context + return noop_context() + + def replay_graph(self, graph): + return + + # Tensor operations + + @property + def BFloat16Tensor(self): + return torch.npu.BFloat16Tensor + + @property + def ByteTensor(self): + return torch.npu.ByteTensor + + @property + def DoubleTensor(self): + return torch.npu.DoubleTensor + + @property + def FloatTensor(self): + return torch.npu.FloatTensor + + @property + def HalfTensor(self): + return torch.npu.HalfTensor + + @property + def IntTensor(self): + return torch.npu.IntTensor + + @property + def LongTensor(self): + return torch.npu.LongTensor + + def pin_memory(self, tensor, align_bytes=1): + return tensor.pin_memory() + + def is_pinned(self, tensor): + return tensor.is_pinned() + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('npu:'): + return True + else: + return False + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + return "op_builder.npu" + except ImportError: + return "deepspeed.ops.op_builder.npu" + + def _lazy_init_class_dict(self): + if self.class_dict: + return + + op_builder_module = importlib.import_module(self.op_builder_dir()) + + # get op builder class from op_builder/npu/__init__.py + self.class_dict = {} + for class_name, class_obj in inspect.getmembers(op_builder_module, inspect.isclass): + self.class_dict[class_name] = class_obj + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, class_name): + builder_class = self.get_op_builder(class_name) + return None if builder_class is None else builder_class() + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + self._lazy_init_class_dict() + if class_name in self.class_dict: + return self.class_dict[class_name] + else: + return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + return BuildExtension + + def export_envs(self): + return ['ASCEND', 'HCCL', 'LD_LIBRARY', 'PATH'] + + def visible_devices_envs(self): + return ['ASCEND_RT_VISIBLE_DEVICES'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/real_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/real_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..037162e867ecc30897210ecab8b339e5fd4e0fe7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/real_accelerator.py @@ -0,0 +1,257 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import os + +try: + # Importing logger currently requires that torch is installed, hence the try...except + # TODO: Remove logger dependency on torch. + from deepspeed.utils import logger as accel_logger +except ImportError as e: + accel_logger = None + +try: + from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1 +except ImportError as e: + dsa1 = None +try: + from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2 +except ImportError as e: + dsa2 = None + +SUPPORTED_ACCELERATOR_LIST = ['cuda', 'cpu', 'xpu', 'xpu.external', 'npu', 'mps', 'hpu'] + +ds_accelerator = None + + +def _validate_accelerator(accel_obj): + # because abstract_accelerator has different path during + # build time (accelerator.abstract_accelerator) + # and run time (deepspeed.accelerator.abstract_accelerator) + # and extension would import the + # run time abstract_accelerator/DeepSpeedAccelerator as its base + # class, so we need to compare accel_obj with both base class. + # if accel_obj is instance of DeepSpeedAccelerator in one of + # accelerator.abstractor_accelerator + # or deepspeed.accelerator.abstract_accelerator, consider accel_obj + # is a conforming object + if not ((dsa1 is not None and isinstance(accel_obj, dsa1)) or (dsa2 is not None and isinstance(accel_obj, dsa2))): + raise AssertionError(f"{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator") + + # TODO: turn off is_available test since this breaks tests + # assert accel_obj.is_available(), \ + # f'{accel_obj.__class__.__name__} accelerator fails is_available() test' + + +def is_current_accelerator_supported(): + return get_accelerator().device_name() in SUPPORTED_ACCELERATOR_LIST + + +def get_accelerator(): + global ds_accelerator + if ds_accelerator is not None: + return ds_accelerator + + accelerator_name = None + ds_set_method = None + # 1. Detect whether there is override of DeepSpeed accelerators from environment variable. + if "DS_ACCELERATOR" in os.environ.keys(): + accelerator_name = os.environ["DS_ACCELERATOR"] + if accelerator_name == "xpu": + try: + import intel_extension_for_pytorch as ipex + assert ipex._C._has_xpu(), "XPU_Accelerator requires an intel_extension_for_pytorch that supports XPU." + except ImportError as e: + raise ValueError( + f"XPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.") + elif accelerator_name == "xpu.external": + try: + import intel_extension_for_deepspeed # noqa: F401 # type: ignore + except ImportError as e: + raise ValueError( + f"XPU_Accelerator external requires intel_extension_for_deepspeed, which is not installed on this system." + ) + elif accelerator_name == "cpu": + pass + elif accelerator_name == "npu": + try: + import torch_npu # noqa: F401 # type: ignore + except ImportError as e: + raise ValueError(f"NPU_Accelerator requires torch_npu, which is not installed on this system.") + pass + elif accelerator_name == "mps": + try: + import torch.mps + + # should use torch.mps.is_available() if it exists someday but this is used as proxy + torch.mps.current_allocated_memory() + except (RuntimeError, ImportError) as e: + raise ValueError(f"MPS_Accelerator requires torch.mps, which is not installed on this system.") + elif accelerator_name == "hpu": + try: + import habana_frameworks.torch.hpu # noqa: F401 + except ImportError as e: + raise ValueError( + f"HPU_Accelerator requires habana_frameworks.torch.hpu, which is not installed on this system.") + elif accelerator_name not in SUPPORTED_ACCELERATOR_LIST: + raise ValueError(f'DS_ACCELERATOR must be one of {SUPPORTED_ACCELERATOR_LIST}. ' + f'Value "{accelerator_name}" is not supported') + ds_set_method = "override" + + # 2. If no override, detect which accelerator to use automatically + if accelerator_name is None: + # We need a way to choose among different accelerator types. + # Currently we detect which accelerator extension is installed + # in the environment and use it if the installing answer is True. + # An alternative might be detect whether CUDA device is installed on + # the system but this comes with two pitfalls: + # 1. the system may not have torch pre-installed, so + # get_accelerator().is_available() may not work. + # 2. Some scenario like install on login node (without CUDA device) + # and run on compute node (with CUDA device) may cause mismatch + # between installation time and runtime. + + try: + from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811 # type: ignore + accelerator_name = "xpu.external" + except ImportError as e: + pass + if accelerator_name is None: + try: + import intel_extension_for_pytorch as ipex + if ipex._C._has_xpu(): + accelerator_name = "xpu" + else: + accelerator_name = "cpu" + except ImportError as e: + pass + if accelerator_name is None: + try: + import torch_npu # noqa: F401,F811 # type: ignore + + accelerator_name = "npu" + except ImportError as e: + pass + if accelerator_name is None: + try: + import torch.mps + + # should use torch.mps.is_available() if it exists someday but this is used as proxy + torch.mps.current_allocated_memory() + accelerator_name = "mps" + except (RuntimeError, ImportError) as e: + pass + if accelerator_name is None: + try: + import habana_frameworks.torch.hpu # noqa: F401,F811 + + accelerator_name = "hpu" + except ImportError as e: + pass + if accelerator_name is None: + # borrow this log from PR#5084 + try: + import torch + + # Determine if we are on a GPU or x86 CPU with torch. + if torch.cuda.is_available(): #ignore-cuda + accelerator_name = "cuda" + else: + if accel_logger is not None: + accel_logger.warn( + "Setting accelerator to CPU. If you have GPU or other accelerator, we were unable to detect it." + ) + accelerator_name = "cpu" + except (RuntimeError, ImportError) as e: + # TODO need a more decent way to detect which accelerator to use, consider using nvidia-smi command for detection + accelerator_name = "cuda" + pass + + ds_set_method = "auto detect" + + # 3. Set ds_accelerator accordingly + if accelerator_name == "cuda": + from .cuda_accelerator import CUDA_Accelerator + + ds_accelerator = CUDA_Accelerator() + elif accelerator_name == "cpu": + from .cpu_accelerator import CPU_Accelerator + + ds_accelerator = CPU_Accelerator() + elif accelerator_name == "xpu.external": + # XPU_Accelerator is already imported in detection stage + ds_accelerator = XPU_Accelerator() + elif accelerator_name == "xpu": + from .xpu_accelerator import XPU_Accelerator + + ds_accelerator = XPU_Accelerator() + elif accelerator_name == "npu": + from .npu_accelerator import NPU_Accelerator + + ds_accelerator = NPU_Accelerator() + elif accelerator_name == "mps": + from .mps_accelerator import MPS_Accelerator + + ds_accelerator = MPS_Accelerator() + elif accelerator_name == 'hpu': + from .hpu_accelerator import HPU_Accelerator + + ds_accelerator = HPU_Accelerator() + _validate_accelerator(ds_accelerator) + if accel_logger is not None: + accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})") + return ds_accelerator + + +def set_accelerator(accel_obj): + global ds_accelerator + _validate_accelerator(accel_obj) + if accel_logger is not None: + accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)") + ds_accelerator = accel_obj + + +""" +-----------[code] test_get.py ----------- +from deepspeed.accelerator import get_accelerator +my_accelerator = get_accelerator() +logger.info(f'{my_accelerator._name=}') +logger.info(f'{my_accelerator._communication_backend=}') +logger.info(f'{my_accelerator.HalfTensor().device=}') +logger.info(f'{my_accelerator.total_memory()=}') +-----------[code] test_get.py ----------- + +---[output] python test_get.py--------- +my_accelerator.name()='cuda' +my_accelerator.communication_backend='nccl' +my_accelerator.HalfTensor().device=device(type='cuda', index=0) +my_accelerator.total_memory()=34089730048 +---[output] python test_get.py--------- + +************************************************************************** +-----------[code] test_set.py ----------- +from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator +cu_accel = CUDA_Accelerator() +logger.info(f'{id(cu_accel)=}') +from deepspeed.accelerator import set_accelerator, get_accelerator +set_accelerator(cu_accel) + +my_accelerator = get_accelerator() +logger.info(f'{id(my_accelerator)=}') +logger.info(f'{my_accelerator._name=}') +logger.info(f'{my_accelerator._communication_backend=}') +logger.info(f'{my_accelerator.HalfTensor().device=}') +logger.info(f'{my_accelerator.total_memory()=}') +-----------[code] test_set.py ----------- + + +---[output] python test_set.py--------- +id(cu_accel)=139648165478304 +my_accelerator= +my_accelerator.name='cuda' +my_accelerator.communication_backend='nccl' +my_accelerator.HalfTensor().device=device(type='cuda', index=0) +my_accelerator.total_memory()=34089730048 +---[output] python test_set.py--------- +""" diff --git a/venv/lib/python3.10/site-packages/deepspeed/accelerator/xpu_accelerator.py b/venv/lib/python3.10/site-packages/deepspeed/accelerator/xpu_accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..c59f60077d2f20daee22ab0cf3450efe1d84c1c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/accelerator/xpu_accelerator.py @@ -0,0 +1,298 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator +import intel_extension_for_pytorch as ipex # noqa: F401 # type: ignore +import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore + + +class XPU_Accelerator(DeepSpeedAccelerator): + + def __init__(self): + self._name = 'xpu' + self._communication_backend_name = 'ccl' + self.aligned_tensors = [] + + def is_synchronized_device(self): + return False + + def use_host_timers(self): + return self.is_synchronized_device() + + def resolves_data_dependency(self): + return self.is_synchronized_device() + + def handles_memory_backpressure(self): + return self.is_synchronized_device() + + # Device APIs + def device_name(self, device_index=None): + if device_index == None: + return 'xpu' + return 'xpu:{}'.format(device_index) + + def device(self, device_index=None): + return torch.xpu.device(device_index) + + def set_device(self, device_index): + torch.xpu.set_device(device_index) + + def current_device(self): + return torch.xpu.current_device() + + def current_device_name(self): + return 'xpu:{}'.format(torch.xpu.current_device()) + + def device_count(self): + return torch.xpu.device_count() + + def synchronize(self, device_index=None): + return torch.xpu.synchronize(device_index) + + # RNG APIs + def random(self): + return torch.xpu.random + + def set_rng_state(self, new_state, device_index=None): + if device_index == None: + return torch.xpu.set_rng_state(new_state) + return torch.xpu.set_rng_state(new_state, device_index) + + def get_rng_state(self, device_index=None): + if device_index == None: + return torch.xpu.get_rng_state() + return torch.xpu.get_rng_state(device_index) + + def manual_seed(self, seed): + return torch.xpu.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.xpu.manual_seed_all(seed) + + def initial_seed(self, seed): + return torch.xpu.initial_seed(seed) + + def default_generator(self, device_index): + return torch.xpu.default_generators[device_index] + + # Streams/Events + @property + def Stream(self): + return torch.xpu.Stream + + def stream(self, stream): + return torch.xpu.stream(stream) + + def current_stream(self, device_index=None): + return torch.xpu.current_stream(device_index) + + def default_stream(self, device_index=None): + # torch.xpu does not support the sync behavior of default stream as cuda + # use current_stream as workaround + # see https://pytorch.org/docs/stable/notes/cuda.html#cuda-streams + return torch.xpu.current_stream(device_index) + + @property + def Event(self): + return torch.xpu.Event + + # Memory management + def empty_cache(self): + return torch.xpu.empty_cache() + + def memory_allocated(self, device_index=None): + return torch.xpu.memory_allocated(device_index) + + def max_memory_allocated(self, device_index=None): + return torch.xpu.max_memory_allocated(device_index) + + def reset_max_memory_allocated(self, device_index=None): + return torch.xpu.reset_max_memory_allocated(device_index) + + def memory_cached(self, device_index=None): + return torch.xpu.memory_reserved(device_index) + + def max_memory_cached(self, device_index=None): + return torch.xpu.max_memory_reserved(device_index) + + def reset_max_memory_cached(self, device_index=None): + return torch.xpu.reset_max_memory_reserved(device_index) + + def memory_stats(self, device_index=None): + return torch.xpu.memory_stats(device_index) + + def reset_peak_memory_stats(self, device_index=None): + return torch.xpu.reset_peak_memory_stats(device_index) + + def memory_reserved(self, device_index=None): + return torch.xpu.memory_reserved(device_index) + + def max_memory_reserved(self, device_index=None): + return torch.xpu.max_memory_reserved(device_index) + + def total_memory(self, device_index=None): + return torch.xpu.get_device_properties(device_index).total_memory + + def available_memory(self, device_index=None): + return self.total_memory(device_index) - self.memory_allocated(device_index) + + # Misc + def amp(self): + return torch.xpu.amp + + def is_available(self): + return torch.xpu.is_available() + + def range_push(self, msg): + # TODO itt is currently not supported yet + # return torch.profiler.itt.range_push(msg) + return + + def range_pop(self): + # TODO itt is currently not supported yet + # return torch.profiler.itt.range_pop() + return + + def lazy_call(self, callback): + return torch.xpu.lazy_init._lazy_call(callback) + + def communication_backend_name(self): + return self._communication_backend_name + + def is_triton_supported(self): + return False + + # Graph operations + def create_graph(self): + return None + + def capture_to_graph(self, graph, pool=None, stream=None): + from deepspeed.runtime.utils import noop_context + return noop_context() + + def replay_graph(self, graph): + return + + # Data types + def is_bf16_supported(self): + return True + + def is_fp16_supported(self): + return True + + def supported_dtypes(self): + return [torch.float, torch.half, torch.bfloat16] + + # Tensor operations + + @property + def BFloat16Tensor(self): + return torch.xpu.BFloat16Tensor + + @property + def ByteTensor(self): + return torch.xpu.ByteTensor + + @property + def DoubleTensor(self): + return torch.xpu.DoubleTensor + + @property + def FloatTensor(self): + return torch.xpu.FloatTensor + + @property + def HalfTensor(self): + return torch.xpu.HalfTensor + + @property + def IntTensor(self): + return torch.xpu.IntTensor + + @property + def LongTensor(self): + return torch.xpu.LongTensor + + def pin_memory(self, tensor, align_bytes=1): + if align_bytes == 1: + return tensor.pin_memory(device=self.current_device_name()) + elif align_bytes == 0: + from intel_extension_for_deepspeed.op_builder.async_io import AsyncIOBuilder + self.aio_handle = AsyncIOBuilder().load().aio_handle(128 * 1024, 8, False, False, False) + aligned_t = self.aio_handle.new_cpu_locked_tensor(tensor.numel(), tensor) + aligned_t = aligned_t[:tensor.numel()].copy_(tensor) + self.aligned_tensors.append([aligned_t.data_ptr(), aligned_t[-1].data_ptr()]) + return aligned_t + + def is_pinned(self, tensor): + if tensor.is_pinned(device=self.current_device_name()): + return True + else: + for begin, end in self.aligned_tensors: + if begin <= tensor.data_ptr() and tensor.data_ptr() <= end: + return True + return False + + def op_builder_dir(self): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + return "op_builder.xpu" + except ImportError: + return "deepspeed.ops.op_builder.xpu" + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('xpu:'): + return True + else: + return False + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, op_name): + builder_class = self.get_op_builder(op_name) + if builder_class != None: + return builder_class() + return None + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.xpu import CPUAdagradBuilder, CPUAdamBuilder, FusedAdamBuilder, AsyncIOBuilder + except ImportError: + from deepspeed.ops.op_builder.xpu import CPUAdagradBuilder, CPUAdamBuilder, FusedAdamBuilder, AsyncIOBuilder + + if class_name == "AsyncIOBuilder": + return AsyncIOBuilder + elif class_name == "CPUAdagradBuilder": + return CPUAdagradBuilder + elif class_name == "CPUAdamBuilder": + return CPUAdamBuilder + elif class_name == "FusedAdamBuilder": + return FusedAdamBuilder + else: + return None + + def build_extension(self): + try: + from intel_extension_for_pytorch.xpu.cpp_extension import DpcppBuildExtension + except ImportError: + from intel_extension_for_pytorch.xpu.utils import DpcppBuildExtension + return DpcppBuildExtension + + def export_envs(self): + return [] + + def visible_devices_envs(self): + return ['ZE_AFFINITY_MASK'] + + def set_visible_devices_envs(self, current_env, local_accelerator_ids): + for env in self.visible_devices_envs(): + current_env[env] = ",".join(map(str, local_accelerator_ids)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73d8153bab920a8e6865a7fd4e59a068e9a2993b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .autotuner import Autotuner diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..362cbea3ebf6413f8c1b1368f003d4f847226c12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/autotuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/autotuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae2bd7cf770ade65beeb3d9627674a20f3ed04fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/autotuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..260e5b142fae532c4e4b685d540a039b4d8ea8a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0d8b89b92d1bf94943a08210763c60641935ca1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1590552a209ab1b84c005cfd5e2943541f49485 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8651b14fbc8c448e598c4ca7fb4c02485be7a86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/autotuner.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/autotuner.py new file mode 100644 index 0000000000000000000000000000000000000000..dfd195bc37ebd9454a9232ae5d9f78fc899a91b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/autotuner.py @@ -0,0 +1,1113 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import shutil +import subprocess +import time +import datetime +import math +import hjson + +from ..runtime.config_utils import dict_raise_error_on_duplicate_keys +from ..runtime.constants import * + +from ..runtime.zero.config import ZERO_OPTIMIZATION, ZeroStageEnum +from ..utils import logger +from .config import DeepSpeedAutotuningConfig +from .constants import * +from .scheduler import ResourceManager +from .tuner import GridSearchTuner, RandomTuner, ModelBasedTuner +from .utils import * +from deepspeed.accelerator import get_accelerator + +try: + from tabulate import tabulate +except ImportError: + tabulate = None + +try: + import mlflow + has_mlflow = True +except Exception as e: + has_mlflow = False + +ZERO_OPTIMIZATION_STAGE = "stage" +OFFLOAD_OPTIMIZER = "offload_optimizer" +OFFLOAD_PARAM = "offload_param" +ZERO_OPTIMIZATION_STAGE_DEFAULT = ZeroStageEnum.disabled + + +class Autotuner: + """The DeepSpeed Autotuner automatically discovers the optimal DeepSpeed configuration that delivers good training speed. The Autotuner uses model information, system information, and heuristics to efficiently tune system knobs that affect compute and memory efficiencies, such as ZeRO optimization stages, micro-batch sizes, and many other ZeRO optimization configurations. It not only reduces the time and resources user spend on tuning, but also can discover configurations better than hand-tuned methods. + Autotuning with DeepSpeed requires no code change from DeepSpeed users. Please refer to the README for usage details. + """ + + def __init__(self, args, active_resources): + self.args = args + self.selected_exp_dir = None + + assert tabulate is not None, "Missing required package `tabulate`, please install with `pip install deepspeed[autotuning]`." + + logger.debug(f"autotuning args={args}") + + self.user_config = self._get_user_config(args.user_args) + assert self.user_config is not None, "DeepSpeed configuration is not provided" + + self.autotuning_config = DeepSpeedAutotuningConfig(self.user_config) + if self.user_config[AUTOTUNING]: + if AUTOTUNING_EXPS_DIR in self.user_config[AUTOTUNING].keys(): + del self.user_config[AUTOTUNING][AUTOTUNING_EXPS_DIR] + if AUTOTUNING_RESULTS_DIR in self.user_config[AUTOTUNING].keys(): + del self.user_config[AUTOTUNING][AUTOTUNING_RESULTS_DIR] + + self.exps_dir = self.autotuning_config.exps_dir + if self.autotuning_config.overwrite and os.path.exists(self.exps_dir): + shutil.rmtree(self.exps_dir, ignore_errors=True) + if not os.path.exists(self.exps_dir): + try: + os.makedirs(self.exps_dir, exist_ok=True) + logger.info(f"Created autotuning experiments directory: {self.exps_dir}") + except: + logger.error( + f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job." + ) + exit(-1) + + self.results_dir = self.autotuning_config.results_dir + if self.autotuning_config.overwrite and os.path.exists(self.results_dir): + shutil.rmtree(self.results_dir, ignore_errors=True) + if not os.path.exists(self.results_dir): + try: + os.makedirs(self.results_dir, exist_ok=True) + logger.info(f"Created autotuning results directory: {self.exps_dir}") + except: + logger.error( + f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job." + ) + exit(-1) + + # set the active resource for the autotuner resource manager + self.rm = self._get_resource_manager(active_resources) + + # get resource requirement for each autotuning experiment + self.exp_num_nodes, self.exp_num_gpus = self._get_exp_resources(args) + + assert self.exp_num_gpus <= self.rm.num_gpus_per_node, "num_gpus in the autotuning configuration must not be less than the --num_gpus value in the train script if any" + assert self.exp_num_nodes <= len( + self.rm.nodes + ), "num_nodes in the autotuning configuration must not be less than the --num_nodes value in the train script if any" + + self.records = {} + self.optimal_cmd = None + self.optimal_ds_config = None + + self.mlflow_parent_id = None + + def print_tuning_results(self): + """Print the autotuning results in tabular format. + """ + best_space_records = self.get_best_space_records() + tab = [] + if best_space_records: + for key, val in best_space_records.items(): + if not val: + continue + row = [] + row.append(key) + num_exps = 0 + if key == GLOBAL_TUNING_SPACE: + cnt = 0 + for k, v in best_space_records.items(): + if k != GLOBAL_TUNING_SPACE: + cnt += v[2] + num_exps = cnt + else: + num_exps = val[2] + row.append(num_exps) + row.append(val[1]) + row.append(val[0]['name']) + tab.append(row) + summary = tabulate(tab, + headers=["tuning_space", "num_experiments", "best_metric_val", "best_exp_name"], + tablefmt="pipe") + print(summary) + with open(os.path.join(self.results_dir, 'summary.txt'), 'w', buffering=BUFSIZE) as fd: + fd.write(summary) + fd.flush() + os.fsync(fd) + + if GLOBAL_TUNING_SPACE in best_space_records: + best_exp, best_metric_val, total_num_exps = best_space_records[GLOBAL_TUNING_SPACE] + if best_exp: + logger.info( + f"{best_exp['name']} is the optimal setup after tuning. The exp result is at {best_exp['result_dir']}." + ) + else: + logger.info(f"No optimal setup is found. Please check that experiments were run successfully.") + tuning_duration = datetime.timedelta(seconds=(time.time() - self.start_time)) + + logger.info(f"Tuning completed in {tuning_duration}") + with open(os.path.join(self.results_dir, 'summary.txt'), 'a') as f: + f.write( + f"\n\nTuning completed in {tuning_duration}. Total number of experiments: {self.rm.experiment_count - 1}." + ) + f.flush() + + def _get_user_config(self, user_args): + """Get DeepSpeed configuration from the user arguments passed to the launcher. + + Args: + user_args ([list]): user arguments passed to the DeepSpeed launcher + + Returns: + [dict]: DeepSpeed configuration dictionary + """ + user_config_file = None + if "--deepspeed_config" in user_args: + idx = user_args.index("--deepspeed_config") + assert ".json" in user_args[ + idx + 1], "DeepSpeed --deepspeed_config requires a json file to specify the configuration" + + user_config_file = user_args[idx + 1] + elif "--deepspeed" in user_args: + idx = user_args.index("--deepspeed") + if ".json" in user_args[idx + 1]: + user_config_file = user_args[idx + 1] + + logger.debug(f"user_config_file = {user_config_file}") + if user_config_file is not None: + assert os.path.isfile(user_config_file), "DeepSpeed configuration file: {} is not an existing file".format( + user_config_file) + if os.path.exists(user_config_file): + return json.load(open(user_config_file, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) + + return None + + def _get_resource_manager(self, active_resources): + """Initialize and return a resource manager + + Args: + active_resources ([dict]): A dictionary of hostname and its slots (GPUs), e.g. {"worker-0": "0,1,2,3,4,5,6,7,8"} + + Raises: + RuntimeError: raises the error if no GPU is available + + Returns: + [ResourceManager]: A resource manager that schedules and runs autotuning experiments. + """ + logger.info(f"active_resources = {active_resources}") + + hosts = [] + ngpus_per_node = 100 + for hostname, slots in active_resources.items(): + hosts.append(hostname) + ngpus_per_node = min(len(slots), ngpus_per_node) + + assert ngpus_per_node > 0, "no gpu is available" + + return ResourceManager(args=self.args, + hosts=hosts, + num_gpus_per_node=ngpus_per_node, + results_dir=self.results_dir, + exps_dir=self.exps_dir, + arg_mappings=self.autotuning_config.arg_mappings) + + def _get_exp_resources(self, args): + """Get resource requirement for each autotuning experiment + + Args: + args (dict): user args + + Returns: + num_nodes, num_gpus: the number of gpus and number of nodes used in the autotuning experiments + """ + if args.num_nodes > 0: + num_nodes = args.num_nodes + else: + num_nodes = len(self.rm.nodes) + + if args.num_gpus > 0: + num_gpus = args.num_gpus + else: + num_gpus = self.rm.num_gpus_per_node + + return num_nodes, num_gpus + + def metric(self): + return self.autotuning_config.metric + + def fast_enabled(self): + return self.autotuning_config.fast + + def max_train_batch_size(self): + return self.autotuning_config.max_train_batch_size + + def mp_size(self): + return self.autotuning_config.mp_size + + def max_train_micro_batch_size_per_gpu(self): + if self.max_train_batch_size( + ) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size + max_train_micro_batch_size = self.max_train_batch_size() * self.mp_size() // ( + self.exp_num_gpus * self.exp_num_nodes) # gradient accumulation steps >=1 + return min(self.autotuning_config.max_train_micro_batch_size_per_gpu, max_train_micro_batch_size) + else: + return self.autotuning_config.max_train_micro_batch_size_per_gpu + + def min_train_micro_batch_size_per_gpu(self): + return self.autotuning_config.min_train_micro_batch_size_per_gpu + + def num_tuning_micro_batch_sizes(self): + return self.autotuning_config.num_tuning_micro_batch_sizes + + def fp16_enabled(self): + if FP16 in self.user_config.keys(): + return self.user_config[FP16].get(FP16_ENABLED, FP16_ENABLED_DEFAULT) + else: + return False + + def get_gpu_memory_info(self): + return get_accelerator().total_memory() + + def get_activation_memory_per_gpu(self): + if self.model_info and "activation_mem_per_gpu" in self.model_info: + return self.model_info["activation_mem_per_gpu"] + + def get_instantiation_memory_required_per_gpu(self, zero_stage): + num_params = self.get_model_num_params() + total_gpus = self.exp_num_nodes * self.exp_num_gpus + fp16_enabled = self.fp16_enabled() + + if not num_params: + return 0 + # assume the model uses Adam optimizer + # ZeroStageEnum.disabled: + params_mem = num_params * (2 if fp16_enabled else 4) + gradients_mem = num_params * (2 if fp16_enabled else 4) + optimizer_mem = num_params * (16 if fp16_enabled else 8) + + if zero_stage >= ZeroStageEnum.optimizer_states: + optimizer_mem = optimizer_mem / total_gpus + + if zero_stage >= ZeroStageEnum.gradients: + gradients_mem = gradients_mem / total_gpus + + if zero_stage >= ZeroStageEnum.weights: + params_mem = params_mem / total_gpus + + mem_per_gpu = (params_mem + gradients_mem + optimizer_mem) / self.mp_size() + + return mem_per_gpu + + def _generate_experiments(self, tuning_space, max_train_batch_size_per_gpu): + """Generates a list of autotuning experiments given a tuning_space. + The corresponding parameter values are replaced by user-defined values in the DeepSpeed configuration file. + Args: + tuning_space ([dict]): A DeepSpeed configuration dictionary where a value can be a list (called a tuning parameter). For example, + { + "zero_optimization": { + "stage": 1, + "reduce_bucket_size": [5e7, + 5e8, + 1e9], + "allgather_bucket_size": [5e7, + 5e8, + 1e9], + } + } + reduce_bucket_size and allgather_bucket_size are the tuning parameters in this tuning space. + Returns: + [list]: a list of experiments generated by taking combinations of values of the tuning space. The above tuning space generates 3*3 = 9 experiments if the user DeepSpeed configuration file does not overwrite the two tuning parameters or define more tuning parameters. + """ + exps = [] + + # each zero stage uses a different template configuration file + config_zero = tuning_space.get(ZERO_OPTIMIZATION, {}) + stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT) + template_config = {} + if stage == 0: + template_path = DEFAULT_TEMPLATE_PATH_ZERO_0 + template_config = hjson.load(open(template_path, 'r')) + prefix = "z0_" + + elif stage == 1: + template_path = DEFAULT_TEMPLATE_PATH_ZERO_1 + template_config = hjson.load(open(template_path, 'r')) + prefix = "z1_" + + elif stage == 2: + template_path = DEFAULT_TEMPLATE_PATH_ZERO_2 + template_config = hjson.load(open(template_path, 'r')) + prefix = "z2_" + + elif stage == 3: + template_path = DEFAULT_TEMPLATE_PATH_ZERO_3 + template_config = hjson.load(open(template_path, 'r')) + model_info = self.model_info + if model_info and "hidden_size" in model_info: + hs = model_info["hidden_size"] + template_config[ZERO_OPTIMIZATION]['reduce_bucket_size'] = hs * hs + template_config[ZERO_OPTIMIZATION]['stage3_prefetch_bucket_size'] = 0.9 * hs * hs + template_config[ZERO_OPTIMIZATION]['stage3_param_persistence_threshold'] = 10 * hs + prefix = "z3_" + else: + return exps + + # replace the corresponding parameter values if the user specifies them in the DeepSpeed configuration file + replace_dict(tuning_space, self.user_config, [ZERO_OPTIMIZATION, TRAIN_MICRO_BATCH_SIZE_PER_GPU]) + + logger.debug(f"tuning_space = {json.dumps(tuning_space)}") + + all_configs = get_all_configs(tuning_space, ignore_keys=["optimizer"]) + + tuning_keys = get_tuning_keys(tuning_space) + + logger.debug(f"tuning_keys = {tuning_keys}") + + logger.debug(f"before pruning total configs = {len(all_configs)}") + + pruned_list = prune_configs(all_configs) + + logger.debug(f"after pruning total configs = {len(pruned_list)}") + + for config in pruned_list: + exp_config = copy.deepcopy(template_config) + # fill the template with the expr config + replace_dict(exp_config, config) + + # if the config does not use offloading, remove the offloading section + config_zero = config.get(ZERO_OPTIMIZATION, None) + if config_zero: + if OFFLOAD_OPTIMIZER not in config_zero and OFFLOAD_OPTIMIZER in exp_config[ZERO_OPTIMIZATION]: + del exp_config[ZERO_OPTIMIZATION][OFFLOAD_OPTIMIZER] + if OFFLOAD_PARAM not in config_zero and OFFLOAD_PARAM in exp_config[ZERO_OPTIMIZATION]: + del exp_config[ZERO_OPTIMIZATION][OFFLOAD_PARAM] + # set gradient accumulation steps according to max_train_batch_size_per_gpu + mbs = exp_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] + gas = max_train_batch_size_per_gpu // mbs + exp_config[GRADIENT_ACCUMULATION_STEPS] = gas + exp_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp = {} + # generate the expr name + exp_name = canonical_name(exp_config, tuning_keys, prefix) + exp['name'] = exp_name + exp[DS_CONFIG] = exp_config + exp['num_gpus'] = self.exp_num_gpus + exp['num_nodes'] = self.exp_num_nodes + exps.append(exp) + + return exps + + def tune(self): + """ Tunes Zero stages, micro batch size per GPU, and other Zero configurations. Performance metrics of different tuning spaces are recorded in self.records. + """ + if has_mlflow: + self.mlflow_parent_id = os.environ['MLFLOW_RUN_ID'] + mlflow.start_run(run_id=self.mlflow_parent_id) + + self.start_time = time.time() + if self.fast_enabled(): + logger.info(f"Fast mode is enabled. Tuning micro batch size only.") + + # model info profile run with DEFAULT_MIN_MEM_CONFIG + model_info = self.model_info_profile_run() + if model_info: + self.model_info = model_info + else: + return + + logger.info(f"The model has {number_to_string(self.get_model_num_params())} parameters.") + + self.gpu_mem = self.get_gpu_memory_info() + logger.info(f"Memory per GPU in the system is {memory_to_string(self.gpu_mem, postfix='B')}.") + + self.activation_mem = self.get_activation_memory_per_gpu() + logger.info( + f"The model requires at least {memory_to_string(self.activation_mem, postfix='B')} activation memory for micro batch size 1." + ) + + stage = self.user_config.get(ZERO_OPTIMIZATION, {}).get(ZERO_OPTIMIZATION_STAGE, 0) + + user_zero_stages = [stage] if not isinstance(stage, list) else stage + logger.info(f"User-defined zero stages are {stage}.") + + mbs = 0 + max_mbs = 0 + metric_val = 0 + + required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.disabled) + self.activation_mem + if self.gpu_mem > required_gpu_mem: + if "all" in user_zero_stages or ZeroStageEnum.disabled in user_zero_stages: + logger.info( + f"The model might be runable with ZERO 0 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1), adding DEFAULT_TUNING_SPACE_ZERO_0 to the global tuning space" + ) + next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_0) + if next_mbs > mbs: + mbs = next_mbs + max_mbs = next_max_mbs + metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z0{self.metric()}", next_metric_val) + else: + logger.info( + f"The model is not runable with ZERO stage {ZeroStageEnum.disabled} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + ) + + required_gpu_mem = self.get_instantiation_memory_required_per_gpu( + ZeroStageEnum.optimizer_states) + self.activation_mem + if self.gpu_mem > required_gpu_mem: + if "all" in user_zero_stages or ZeroStageEnum.optimizer_states in user_zero_stages: + logger.info( + f"The model might be runable with ZERO 1 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_1 to the global tuning space" + ) + next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_1, + prev_max_mbs=max_mbs, + prev_best_mbs=mbs, + prev_best_metric_val=metric_val) + if next_mbs > mbs: + mbs = next_mbs + max_mbs = next_max_mbs + metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z1{self.metric()}", next_metric_val) + else: + logger.info( + f"The model is not runable with ZERO stage {ZeroStageEnum.optimizer_states} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + ) + + required_gpu_mem = self.get_instantiation_memory_required_per_gpu( + ZeroStageEnum.gradients) + self.activation_mem + if self.gpu_mem > required_gpu_mem: + if "all" in user_zero_stages or ZeroStageEnum.gradients in user_zero_stages: + logger.info( + f"The model might be runable with ZERO 2 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_2 to the global tuning space" + ) + next_max_mbs, next_mbs, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_2, + prev_max_mbs=max_mbs, + prev_best_mbs=mbs, + prev_best_metric_val=metric_val) + if next_mbs > mbs: + mbs = next_mbs + max_mbs = next_max_mbs + metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z2{self.metric()}", next_metric_val) + else: + logger.info( + f"The model is not runable with ZERO stage {ZeroStageEnum.gradients} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + ) + + required_gpu_mem = self.get_instantiation_memory_required_per_gpu(ZeroStageEnum.weights) + self.activation_mem + if self.gpu_mem > required_gpu_mem: + if "all" in user_zero_stages or ZeroStageEnum.weights in user_zero_stages: + logger.info( + f"The model might be runable with ZERO 3 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_3 to the global tuning space" + ) + _, _, next_metric_val = self.tune_space(DEFAULT_TUNING_SPACE_ZERO_3, + prev_max_mbs=max_mbs, + prev_best_mbs=mbs, + prev_best_metric_val=metric_val) + if has_mlflow: + mlflow.log_metric(f"z3{self.metric()}", next_metric_val) + else: + logger.info( + f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZeroStageEnum.weights} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed." + ) + return + if has_mlflow: + mlflow.end_run() + + def tune_space(self, tuning_space, prev_max_mbs=0, prev_best_mbs=0, prev_best_metric_val=0): + config_zero = tuning_space.get(ZERO_OPTIMIZATION, {}) + stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None) + tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage) + tuning_micro_batch_sizes = [] + max_train_batch_size_per_gpu = 0 + tuning_micro_batch_sizes_overwritten = False + + # calculate max micro batch size using gpu memory, model instantiation memory and activation memory + # calculated_max_micro_batch_size = (memory_per_gpu - instantiation_memory) // activation_memory_micro_batch_size_1 + calculated_max_micro_batch_size = int( + self.gpu_mem - self.get_instantiation_memory_required_per_gpu(stage)) // self.activation_mem + logger.info( + f"Start tuning for space {tuning_space_name}, calculated_max_micro_batch_size = {calculated_max_micro_batch_size}" + ) + + if calculated_max_micro_batch_size < prev_max_mbs: + logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}") + return 0, 0, 0 + + if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance( + self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], list): + # user-specified micro batch size per gpu is a list which overwrites the default tuning behavior + tuning_micro_batch_sizes = [ + s for s in self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] if isinstance(s, int) + ] + gas = self.get_gas_from_user_config() + min_micro_batch_size = min(tuning_micro_batch_sizes) + max_micro_batch_size = max(tuning_micro_batch_sizes) + max_train_batch_size_per_gpu = max_micro_batch_size * gas + tuning_micro_batch_sizes_overwritten = True + else: + # auto-detects the list of micro batch sizes to tune + min_micro_batch_size, max_micro_batch_size = self.get_min_max_micro_batch_size( + stage, prev_max_mbs, calculated_max_micro_batch_size) + + if max_micro_batch_size < prev_max_mbs: + logger.info(f"No need to tune Zero stage {stage}. End tuning for space {tuning_space_name}") + return 0, 0, 0 + + tuning_micro_batch_sizes, max_train_batch_size_per_gpu = self.get_tuning_micro_batch_size_list( + min_micro_batch_size, + max_micro_batch_size, + num_tuning_micro_batch_sizes=self.num_tuning_micro_batch_sizes()) + + logger.info( + f"tuning_micro_batch_sizes = {tuning_micro_batch_sizes}, max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}" + ) + + # return if the tuning_micro_batch_sizes list is empty + if not tuning_micro_batch_sizes: + logger.info(f"End tuning for space {tuning_space_name}") + return 0, 0, 0 + + # tune micro batch sizes and gradient accumulation steps given max_train_batch_size_per_gpu + tuning_micro_batch_sizes = self.run_tuning_micro_batch_sizes(tuning_micro_batch_sizes, + max_train_batch_size_per_gpu, + min_micro_batch_size, stage, + tuning_micro_batch_sizes_overwritten) + + fast_best_record = self.get_best_space_record(tuning_space_name) + fast_best_metric_val = fast_best_record[1] if fast_best_record else 0 + fast_best_mbs = fast_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if fast_best_record else 0 + logger.info(f"fast_best_mbs = {fast_best_mbs}, name = {fast_best_record[0]['name']}") + + if self.fast_enabled() or stage == 0: + logger.info(f"End tuning for space: {tuning_space_name}") + return max_micro_batch_size, fast_best_mbs, fast_best_metric_val + + # if the best metric or the micro batch size for that best metric in the current Zero stage after tuning micro batch size is less than the corresponding value in the previous Zero stage, return, do not tune other Zero configuration parameters + if stage > 0: + if fast_best_mbs <= prev_best_mbs or fast_best_metric_val < prev_best_metric_val: + logger.info( + f"End tuning for space: {tuning_space_name}. No need to tune other Zero configuration parameters.") + return max_micro_batch_size, fast_best_mbs, fast_best_metric_val + + tuning_space[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = tuning_micro_batch_sizes + tuning_space_name = canonical_name(tuning_space, + tuning_keys=get_tuning_keys(tuning_space), + prefix="z" + str(stage) + "_", + omit_val=True) + + logger.info(f'Tuning space is {tuning_space}') + logger.info(f'Tuning space name is {tuning_space_name}') + + exps = self._generate_experiments(tuning_space, max_train_batch_size_per_gpu) + + logger.info(f'Tuner type is {self.autotuning_config.tuner_type}') + if self.autotuning_config.tuner_type == AUTOTUNING_TUNER_MODELBASED: + t = ModelBasedTuner(exps, self.rm, self.metric(), tuning_space) + elif self.autotuning_config.tuner_type == AUTOTUNING_TUNER_RANDOM: + t = RandomTuner(exps, self.rm, self.metric()) + else: + t = GridSearchTuner(exps, self.rm, self.metric()) + + sample_size = len(self.rm.nodes) * self.rm.num_gpus_per_node // (self.exp_num_gpus * self.exp_num_nodes) + num_exps = t.tune(sample_size=sample_size, + n_trials=self.autotuning_config.tuner_num_trials, + early_stopping=self.autotuning_config.tuner_early_stopping) + exp = t.best_exp + metric_val = t.best_metric_val + if exp: + self.update_records(tuning_space_name, exp, metric_val, num_exps) + + full_best_record = self.get_best_space_record(tuning_space_name) + full_best_metric_val = full_best_record[1] if full_best_record else -1 + + if full_best_metric_val > fast_best_metric_val: + best_metric_val = full_best_metric_val + best_mbs = full_best_record[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] if full_best_record else -1 + else: + best_metric_val = fast_best_metric_val + best_mbs = fast_best_mbs + + logger.info(f"End tuning for space: {tuning_space_name}") + return max_micro_batch_size, best_mbs, best_metric_val + + def get_plateau_mbs(self, tuning_space_name): + if tuning_space_name not in self.records: + return 0 + space_records = self.records[tuning_space_name] + sorted_space_records = sorted(space_records, key=lambda x: x[0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]) + prev_metric_val = None + prev_micro_batch_size = 0 + for (exp, metric_val, _) in sorted_space_records: + if prev_metric_val: + if metric_val < prev_metric_val: + break + if (metric_val >= prev_metric_val + and (metric_val - prev_metric_val) / prev_metric_val < METRIC_PERCENT_DIFF_CONST): + break + prev_metric_val = metric_val + prev_micro_batch_size = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] + plateau_mbs = prev_micro_batch_size + return plateau_mbs + + def get_model_num_params(self): + if self.model_info and "num_params" in self.model_info: + return self.model_info["num_params"] + + def model_info_profile_run(self): + """Does a model information profiling experiment that collects the number of model parameters and activation memory.\ + The experiment produces a "profile_model_info" folder under self.results_dir. + Returns: + [dict]: a model information dictionary, e.g., {"num_params": 335144976, "trainable_num_params": 335144976, "activation_mem_per_gpu": 324358144, "rank": 0} + """ + logger.info("Starting model info profile run.") + model_info = self.autotuning_config.model_info + if model_info and MODEL_INFO_NUM_PARAMS in model_info: + return model_info + + ds_config = copy.deepcopy(self.user_config) + replace_dict(ds_config, DEFAULT_MIN_MEM_CONFIG) + + model_info_path = os.path.join(self.results_dir, "profile_model_info", "model_info.json") + ds_config[AUTOTUNING] = {"enabled": True, "model_info_path": model_info_path, "model_info": {"profile": True}} + + exp_config = {} + exp_name = "profile_model_info" + exp_config['name'] = exp_name + exp_config[DS_CONFIG] = ds_config + exp_config['num_gpus'] = self.exp_num_gpus + exp_config['num_nodes'] = self.exp_num_nodes + exp_config['hostfile'] = self.args.hostfile + exp_path = os.path.join(self.exps_dir, f'{exp_name}.json') + + with open(exp_path, 'w', buffering=BUFSIZE) as fd: + json.dump(exp_config, fd) + fd.flush() + os.fsync(fd) + + self.rm.schedule_experiments([exp_path]) + self.rm.run() + + for exp_id, (exp_json, err) in self.rm.finished_experiments.items(): + self.rm.clear() + if err: + logger.error(f"The model is not runnable with DeepSpeed with error = {err}") + return None + + if os.path.exists(model_info_path): + with open(model_info_path, 'r') as f: + model_info = hjson.load(f) + return model_info + + def update_records(self, space_name, exp, metric_val, num_exps): + if space_name not in self.records: + self.records[space_name] = [(exp, metric_val, num_exps)] + else: + self.records[space_name].append((exp, metric_val, num_exps)) + + def get_best_space_record(self, space_name): + if space_name not in self.records: + return None + space_records = self.records[space_name] + best_space_record = None + space_num_exps = 0 + for (exp, metric_val, num_exps) in space_records: + space_num_exps += num_exps + if best_space_record is None or metric_val > best_space_record[1]: + best_space_record = (exp, metric_val) + if best_space_record: + best_space_record = best_space_record + (space_num_exps, ) + return best_space_record + + def get_best_space_records(self): + best_space_records = {} + global_best_record = None + for space_name, space_records in self.records.items(): + best_space_record = self.get_best_space_record(space_name) + if best_space_record: + best_space_records[space_name] = best_space_record + if not global_best_record or best_space_record[1] > global_best_record[1]: + global_best_record = best_space_record + if global_best_record: + best_space_records[GLOBAL_TUNING_SPACE] = global_best_record + return best_space_records + + def run_tuning_micro_batch_sizes(self, tuning_micro_batch_sizes, max_train_batch_size_per_gpu, + min_micro_batch_size, stage, tuning_micro_batch_sizes_overwritten): + assert tuning_micro_batch_sizes, "the tuning micro batch size list is empty" + tuning_micro_batch_sizes.sort() + max_micro_batch_size = tuning_micro_batch_sizes[-1] + max_micro_batch_size_metric_val = 0 + + ds_config = get_first_config(self.user_config) + ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage} + tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage) + + exp_paths = [] + for mbs in tuning_micro_batch_sizes: + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs + gas = max_train_batch_size_per_gpu // mbs + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) + exp_config = {} + exp_config['name'] = exp_name + exp_config[DS_CONFIG] = ds_config + exp_config['num_gpus'] = self.exp_num_gpus + exp_config['num_nodes'] = self.exp_num_nodes + exp_config['hostfile'] = self.args.hostfile + exp_path = os.path.join(self.exps_dir, f'{exp_name}.json') + + with open(exp_path, 'w', buffering=BUFSIZE) as fd: + json.dump(exp_config, fd) + fd.flush() + os.fsync(fd) + exp_paths.append(exp_path) + + self.rm.schedule_experiments(exp_paths) + self.rm.run() + + for exp_id, (exp, err) in self.rm.finished_experiments.items(): + if exp: + metric_file = exp[DS_CONFIG][AUTOTUNING][AUTOTUNING_METRIC_PATH] + if os.path.exists(metric_file): + + with open(metric_file, 'r') as f: + results = hjson.load(f) + metric_val = results[self.metric()] + self.update_records(tuning_space_name, exp, metric_val, 1) + if max_micro_batch_size == exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU]: + max_micro_batch_size_metric_val = metric_val + if has_mlflow: + os.environ.pop('MLFLOW_RUN_ID') + mlflow.start_run(nested=True, run_name=exp['name']) + for metric in results: + mlflow.log_metric(metric, results[metric]) + mlflow.end_run() + os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id + else: + self.update_records(tuning_space_name, exp, 0, 1) + else: + mbs = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] + logger.info(f"micro batch size = {mbs} was not run successfully") + + self.rm.clear() + + if tuning_micro_batch_sizes_overwritten: + return tuning_micro_batch_sizes + + # in a auto-detected tuning_micro_batch_sizes list, max_micro_batch_size might not be performant as the memory consumption is close to max + # try smaller values while gas stays the same + # if finding a more performant mbs value, use it to replace max_micro_batch_size in the list + min_micro_batch_size_with_same_gas = (tuning_micro_batch_sizes[-2] + + 1) if len(tuning_micro_batch_sizes) > 1 else min_micro_batch_size + + prev_best_metric_val = max_micro_batch_size_metric_val + prev_best_mbs = max_micro_batch_size + + stride = (max_micro_batch_size - min_micro_batch_size_with_same_gas) // 3 + if stride == 0: + stride = 1 + for mbs in reversed(range(min_micro_batch_size_with_same_gas, max_micro_batch_size, stride)): + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs + gas = max_train_batch_size_per_gpu // mbs + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + + if metric_val: + with open(metric_file, 'r') as f: + results = hjson.load(f) + metric_val = results[self.metric()] + if has_mlflow: + os.environ.pop('MLFLOW_RUN_ID') + mlflow.start_run(nested=True, run_name=exp_name) + for metric in results: + mlflow.log_metric(metric, results[metric]) + mlflow.end_run() + os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id + self.update_records(tuning_space_name, exp, metric_val, 1) + if metric_val > prev_best_metric_val * (1 + METRIC_PERCENT_DIFF_CONST): + prev_best_metric_val = metric_val + prev_best_mbs = mbs + else: + break + else: + self.update_records(tuning_space_name, exp, 0, 1) + break + if prev_best_mbs != max_micro_batch_size: + tuning_micro_batch_sizes[-1] = prev_best_mbs + return tuning_micro_batch_sizes + + def get_min_max_micro_batch_size(self, stage, min_micro_batch_size, calculated_max_micro_batch_size): + # get min and max micro batch size with gradient accumulation steps = 1 + if min_micro_batch_size > calculated_max_micro_batch_size: + return -1, -1 + + used_micro_batch_sizes = [] + tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage) + + ds_config = get_first_config(self.user_config) + ds_config[ZERO_OPTIMIZATION] = {ZERO_OPTIMIZATION_STAGE: stage} + gas = self.get_gas_from_user_config() + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + + # search for the min micro batch size + if min_micro_batch_size < 1: + if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self.user_config and isinstance( + self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU], int): + # user specifies train_micro_batch_size_per_gpu as an int + mbs = int(self.user_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU]) + else: + # user does not specify train_micro_batch_size_per_gpu or sets it to "auto" when using Hugging Face + val = self.get_val_from_user_args(TRAIN_MICRO_BATCH_SIZE_PER_GPU) + if val: + mbs = int(val) + else: + mbs = 1 + assert mbs > 0, "The micro batch size per GPU must be greater than 0." + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + if metric_val: + self.update_records(tuning_space_name, exp, metric_val, 1) + used_micro_batch_sizes.append(mbs) + min_micro_batch_size = mbs + else: + self.update_records(tuning_space_name, exp, 0, 1) + logger.info(f"User-specified micro batch size per GPU {mbs} does not run") + if self.min_train_micro_batch_size_per_gpu() == mbs: + return -1, -1 + mbs = self.min_train_micro_batch_size_per_gpu() + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + if not metric_val: + self.update_records(tuning_space_name, exp, 0, 1) + logger.info(f"min_train_micro_batch_size_per_gpu {mbs} is not runnable.") + return -1, -1 + self.update_records(tuning_space_name, exp, metric_val, 1) + min_micro_batch_size = mbs + used_micro_batch_sizes.append(mbs) + else: + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = min_micro_batch_size + ds_config[GRADIENT_ACCUMULATION_STEPS] = gas + ds_config[TRAIN_BATCH_SIZE] = min_micro_batch_size * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(min_micro_batch_size) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + if metric_val: + self.update_records(tuning_space_name, exp, metric_val, 1) + used_micro_batch_sizes.append(min_micro_batch_size) + else: + self.update_records(tuning_space_name, exp, 0, 1) + return -1, -1 + + # search for the max micro batch size + max_micro_batch_size = min(calculated_max_micro_batch_size, self.max_train_micro_batch_size_per_gpu()) + for mbs in [math.ceil(1.05 * max_micro_batch_size), max_micro_batch_size, int(0.95 * max_micro_batch_size)]: + if mbs > self.max_train_micro_batch_size_per_gpu(): + continue + if mbs in used_micro_batch_sizes: + return min_micro_batch_size, mbs + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mbs + ds_config[TRAIN_BATCH_SIZE] = mbs * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + + if metric_val: + logger.info(f"mbs = {mbs} is found as max mbs") + self.update_records(tuning_space_name, exp, metric_val, 1) + used_micro_batch_sizes.append(mbs) + return min_micro_batch_size, mbs + else: + self.update_records(tuning_space_name, exp, 0, 1) + + space_records = self.records[tuning_space_name] if tuning_space_name in self.records else [] + if space_records: + prev_idx = min(range(len(space_records)), + key=lambda i: abs(space_records[i][0][DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] - + min_micro_batch_size)) + prev_metric_val = space_records[prev_idx][1] + else: + prev_metric_val = None + + low = min_micro_batch_size + high = max_micro_batch_size + # binary search until low is the smallest micro batch size that OOMs. + while low <= high: + mid = int((low + high) // 2) + logger.debug(f"trying mbs = {mid}, low = {low}, high = {high}") + if mid not in used_micro_batch_sizes: + ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mid + ds_config[TRAIN_BATCH_SIZE] = mid * gas * \ + self.exp_num_gpus * self.exp_num_nodes // self.mp_size() + exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mid) + exp, metric_val = self.run_ds_config(ds_config, exp_name) + if metric_val: + low = mid + 1 + self.update_records(tuning_space_name, exp, metric_val, 1) + used_micro_batch_sizes.append(mid) + if prev_metric_val and ( + (metric_val - prev_metric_val) / prev_metric_val) < METRIC_PERCENT_DIFF_CONST: + logger.info(f"performance plateaus at mbs = {low}") + break + prev_metric_val = metric_val + else: + self.update_records(tuning_space_name, exp, 0, 1) + high = mid - 1 + else: + low = mid + 1 + max_micro_batch_size = low - 1 + + logger.info(f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}.") + + return min_micro_batch_size, max_micro_batch_size + + def get_gas_from_user_config(self): + gas = 1 + if GRADIENT_ACCUMULATION_STEPS in self.user_config: + gas_in_config = self.user_config[GRADIENT_ACCUMULATION_STEPS] + if isinstance(gas_in_config, int): + gas = gas_in_config + elif gas_in_config == "auto": # GRADIENT_ACCUMULATION_STEPS: "auto" + val = self.get_val_from_user_args(GRADIENT_ACCUMULATION_STEPS) + if val: + gas = int(val) + elif isinstance(gas_in_config, list): + logger.info( + f"Specifying a list of {GRADIENT_ACCUMULATION_STEPS} to tune is not supported. 1 would be used.") + assert gas > 0, "Gradient accumulation steps must be positive." + return gas + + def get_val_from_user_args(self, ds_name): + arg_mappings = self.autotuning_config.arg_mappings + user_args = self.args.user_args + if arg_mappings and ds_name in arg_mappings: + arg_name = arg_mappings[ds_name] + if arg_name in user_args: + idx = user_args.index(arg_name) + if user_args[idx + 1].isnumeric(): + return (user_args[idx + 1]) + return None + + def get_tuning_micro_batch_size_list(self, min_micro_batch_size, max_micro_batch_size, + num_tuning_micro_batch_sizes): + """Get a list of micro batch sizes to tune based on min and max values, as well as the size of the list. + Args: + min_micro_batch_size ([int]): min micro batch size per GPU + max_micro_batch_size ([int]): max micro batch size per GPU + num_tuning_micro_batch_sizes (int): the number of items in the returned list + + Returns: + [list]: a list of micro batch sizes to tune. + """ + if min_micro_batch_size <= 0 or max_micro_batch_size <= 0: + logger.info( + f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}") + return [], 0 + + # NUM_GPUS=$(( ${NUM_WORKERS} * ${NUM_GPUS_PER_WORKER} )) + # DP_SIZE=$(( ${NUM_GPUS} / (${PP_SIZE} * ${MP_SIZE}) )) + # GRAD_ACC_STEPS=$(( ${TARGET_GLOBAL_BATCH_SIZE} / (${BATCH_SIZE} * ${DP_SIZE}) )) + if self.max_train_batch_size( + ) and self.max_train_batch_size() > 0: # if the user specifies a max_train_batch_size + max_train_batch_size_per_gpu = self.max_train_batch_size() * self.mp_size() // (self.exp_num_gpus * + self.exp_num_nodes) + else: + gas = self.get_gas_from_user_config() + max_train_batch_size_per_gpu = max_micro_batch_size * gas // self.mp_size() + logger.info(f"max_train_batch_size_per_gpu = {max_train_batch_size_per_gpu}") + if min_micro_batch_size < max_micro_batch_size // 2: + min_micro_batch_size = max_micro_batch_size // 2 + + # constant stride + stride = (max_micro_batch_size - min_micro_batch_size) // num_tuning_micro_batch_sizes + if stride == 0: + stride = 1 + ls = [] + min_gas = max_train_batch_size_per_gpu // max_micro_batch_size + # if gas is the same as min_gas, do not add mbs to the tuning list + for mbs in range(min_micro_batch_size, max_micro_batch_size, stride): + if max_train_batch_size_per_gpu // mbs != min_gas: + ls.append(mbs) + ls.append(max_micro_batch_size) + + return ls, max_train_batch_size_per_gpu + + def run_ds_config(self, ds_config, exp_name): + exp_config = {} + exp_config['name'] = exp_name + exp_config[DS_CONFIG] = ds_config + exp_config['num_gpus'] = self.exp_num_gpus + exp_config['num_nodes'] = self.exp_num_nodes + exp_config['hostfile'] = self.args.hostfile + exp_path = os.path.join(self.exps_dir, f'{exp_name}.json') + + logger.debug(f'run_ds_config exp_name = {exp_name}') + + with open(exp_path, 'w', buffering=BUFSIZE) as fd: + json.dump(exp_config, fd) + fd.flush() + os.fsync(fd) + self.rm.schedule_experiments([exp_path]) + self.rm.run() + exp, metric_val = self.rm.parse_results(self.metric()) + self.rm.clear() + return exp, metric_val + + def write_optimal_config(self): + best_space_records = self.get_best_space_records() + if GLOBAL_TUNING_SPACE not in best_space_records: + return + best_exp, best_metric_val, _ = best_space_records[GLOBAL_TUNING_SPACE] + if best_exp: + exp_dir = best_exp["result_dir"] + cmd = None + with open(os.path.join(exp_dir, "cmd.txt"), "r") as f: + cmd = [str(i) for i in f.read().split()] + + ds_config = hjson.load(open(os.path.join(exp_dir, "ds_config.json"), "r")) + ds_config.pop(AUTOTUNING) + + ds_config_path = os.path.join(self.results_dir, "ds_config_optimal.json") + json.dump(ds_config, open(ds_config_path, "w")) + + cmd_path = os.path.join(self.results_dir, "cmd_optimal.txt") + with open(cmd_path, "w") as fd: + fd.write(" ".join(cmd)) + fd.write("\n") + fd.flush() + self.optimal_cmd = cmd + self.optimal_ds_config = ds_config + logger.info( + f"Wrote the optimal DeepSpeed configuration found by autotuning to {ds_config_path}, and the corresponding DeepSpeed command to {cmd_path}" + ) + + def run_after_tuning(self): + """ Launches the training with the optimal DeepSpeed configuration found through the autotuning process. + "ds_config_optimal.json" describing the optimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir. + """ + if self.optimal_cmd: + result = subprocess.Popen(self.optimal_cmd) + result.wait() + + logger.info(f"Done running with the optimal DeepSpeed configuration using {self.optimal_cmd}") + else: + logger.info(f"No optimal DeepSpeed configuration found by autotuning.") diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/config.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6f58fb4e42965be30820809a1c8d6f424b209bd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config.py @@ -0,0 +1,98 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject +from deepspeed.autotuning.constants import * + + +class DeepSpeedAutotuningConfig(DeepSpeedConfigObject): + + def __init__(self, param_dict): + super(DeepSpeedAutotuningConfig, self).__init__() + + self.enabled = None + self.start_step = None + self.end_step = None + self.metric_path = None + self.arg_mappings = None + self.metric = None + self.model_info = None + self.results_dir = None + self.exps_dir = None + self.overwrite = None + + if param_dict and AUTOTUNING in param_dict.keys(): + autotuning_dict = param_dict[AUTOTUNING] + else: + autotuning_dict = {} + + self._initialize(autotuning_dict) + + def _initialize(self, autotuning_dict): + self.enabled = get_scalar_param(autotuning_dict, AUTOTUNING_ENABLED, AUTOTUNING_ENABLED_DEFAULT) + + self.fast = get_scalar_param(autotuning_dict, AUTOTUNING_FAST, AUTOTUNING_FAST_DEFAULT) + + self.results_dir = get_scalar_param(autotuning_dict, AUTOTUNING_RESULTS_DIR, AUTOTUNING_RESULTS_DIR_DEFAULT) + assert self.results_dir, "results_dir cannot be empty" + self.exps_dir = get_scalar_param(autotuning_dict, AUTOTUNING_EXPS_DIR, AUTOTUNING_EXPS_DIR_DEFAULT) + assert self.exps_dir, "exps_dir cannot be empty" + self.overwrite = get_scalar_param(autotuning_dict, AUTOTUNING_OVERWRITE, AUTOTUNING_OVERWRITE_DEFAULT) + + self.start_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_START_PROFILE_STEP, + AUTOTUNING_START_PROFILE_STEP_DEFAULT) + + self.end_profile_step = get_scalar_param(autotuning_dict, AUTOTUNING_END_PROFILE_STEP, + AUTOTUNING_END_PROFILE_STEP_DEFAULT) + + self.metric = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC, AUTOTUNING_METRIC_DEFAULT) + + self.metric_path = get_scalar_param(autotuning_dict, AUTOTUNING_METRIC_PATH, AUTOTUNING_METRIC_PATH_DEFAULT) + + self.tuner_type = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_TYPE, AUTOTUNING_TUNER_TYPE_DEFAULT) + + self.tuner_early_stopping = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_EARLY_STOPPING, + AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT) + + self.tuner_num_trials = get_scalar_param(autotuning_dict, AUTOTUNING_TUNER_NUM_TRIALS, + AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT) + + self.arg_mappings = get_dict_param(autotuning_dict, AUTOTUNING_ARG_MAPPINGS, AUTOTUNING_ARG_MAPPINGS_DEFAULT) + + self.model_info = get_model_info_config(autotuning_dict) + + self.model_info_path = get_scalar_param(autotuning_dict, AUTOTUNING_MODEL_INFO_PATH, + AUTOTUNING_MODEL_INFO_PATH_DEFAULT) + self.mp_size = get_scalar_param(autotuning_dict, AUTOTUNING_MP_SIZE, AUTOTUNING_MP_SIZE_DEFAULT) + + self.max_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MAX_TRAIN_BATCH_SIZE, + AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT) + + self.min_train_batch_size = get_dict_param(autotuning_dict, AUTOTUNING_MIN_TRAIN_BATCH_SIZE, + AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT) + + self.max_train_micro_batch_size_per_gpu = get_dict_param( + autotuning_dict, AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU, + AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT) + + self.min_train_micro_batch_size_per_gpu = get_dict_param( + autotuning_dict, AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU, + AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT) + + self.num_tuning_micro_batch_sizes = get_dict_param(autotuning_dict, AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES, + AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT) + + +def get_model_info_config(param_dict): + if MODEL_INFO in param_dict and param_dict[MODEL_INFO] is not None: + model_info_config = {} + for key, default_value in MODEL_INFO_KEY_DEFAULT_DICT.items(): + model_info_config[key] = get_scalar_param(param_dict[MODEL_INFO], key, default_value) + return model_info_config + return None + + +def get_default_model_info_config(): + return MODEL_INFO_KEY_DEFAULT_DICT diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero0.json b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero0.json new file mode 100644 index 0000000000000000000000000000000000000000..b95c7da0948ef76ae2e593ffacc7b1c153c6671d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero0.json @@ -0,0 +1,5 @@ +{ + "zero_optimization": { + "stage": 0 + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero1.json b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero1.json new file mode 100644 index 0000000000000000000000000000000000000000..dc90f946f57436a74b540e60907b21283825fe31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero1.json @@ -0,0 +1,7 @@ +{ + "zero_optimization": { + "stage": 1, + "reduce_bucket_size": 5e8, + "allgather_bucket_size": 5e8 + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero2.json b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero2.json new file mode 100644 index 0000000000000000000000000000000000000000..46f1817af7eead82a702822c5f2feacdf1a173e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero2.json @@ -0,0 +1,11 @@ +{ + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": false + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero3.json b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero3.json new file mode 100644 index 0000000000000000000000000000000000000000..620d7eb10e81102a9ec77cfaf3e9c1ab1ccb9a80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/config_templates/template_zero3.json @@ -0,0 +1,17 @@ +{ + "zero_optimization": { + "stage": 3, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients": false, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_prefetch_bucket_size": 5e8, + "stage3_param_persistence_threshold": 1e6, + "stage3_gather_16bit_weights_on_model_save": false, + "sub_group_size": 1e12 + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/constants.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..908868a417bb0ab6fc00e9b277adce7201d21ec4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/constants.py @@ -0,0 +1,185 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######################################### +# autotuner implementation constants +######################################### + +import os + +DEFAULT_TEMPLATE_PATH_ZERO_0 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates", + "template_zero0.json") +DEFAULT_TEMPLATE_PATH_ZERO_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates", + "template_zero1.json") +DEFAULT_TEMPLATE_PATH_ZERO_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates", + "template_zero2.json") +DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates", + "template_zero3.json") + +METRIC_PERCENT_DIFF_CONST = 0.05 +DS_CONFIG = "ds_config" +BUFSIZE = 1 # line buffer size for writing files + +######################################### +# autotuner configuration constants +######################################### +# Autotuner. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +AUTOTUNING_FORMAT = """ +autotuner should be enabled as: +"session_params": { + "autotuning": { + "enabled": true, + "start_step": 5, + "end_step": 15 + } +} +""" + +AUTOTUNING = "autotuning" + +AUTOTUNING_ENABLED = "enabled" +AUTOTUNING_ENABLED_DEFAULT = False + +AUTOTUNING_FAST = "fast" +AUTOTUNING_FAST_DEFAULT = True + +AUTOTUNING_RESULTS_DIR = "results_dir" +AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results" + +AUTOTUNING_EXPS_DIR = "exps_dir" +AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps" + +AUTOTUNING_OVERWRITE = "overwrite" +AUTOTUNING_OVERWRITE_DEFAULT = True + +AUTOTUNING_START_PROFILE_STEP = "start_profile_step" +AUTOTUNING_START_PROFILE_STEP_DEFAULT = 3 + +AUTOTUNING_END_PROFILE_STEP = "end_profile_step" +AUTOTUNING_END_PROFILE_STEP_DEFAULT = 5 +AUTOTUNING_METRIC_PATH = "metric_path" +AUTOTUNING_METRIC_PATH_DEFAULT = None + +AUTOTUNING_TUNER_TYPE = "tuner_type" +AUTOTUNING_TUNER_GRIDSEARCH = "gridsearch" +AUTOTUNING_TUNER_RANDOM = "random" +AUTOTUNING_TUNER_MODELBASED = "model_based" +AUTOTUNING_TUNER_TYPE_DEFAULT = AUTOTUNING_TUNER_GRIDSEARCH +AUTOTUNING_TUNER_EARLY_STOPPING = "tuner_early_stopping" +AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT = 5 +AUTOTUNING_TUNER_NUM_TRIALS = "tuner_num_trials" +AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT = 50 + +AUTOTUNING_ARG_MAPPINGS = "arg_mappings" +AUTOTUNING_ARG_MAPPINGS_DEFAULT = None + +AUTOTUNING_MAX_TRAIN_BATCH_SIZE = "max_train_batch_size" +AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT = None +AUTOTUNING_MIN_TRAIN_BATCH_SIZE = "min_train_batch_size" +AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT = 1 +AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "max_train_micro_batch_size_per_gpu" +AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1024 +AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "min_train_micro_batch_size_per_gpu" +AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1 +AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES = "num_tuning_micro_batch_sizes" +AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT = 3 + +AUTOTUNING_MP_SIZE = "mp_size" +AUTOTUNING_MP_SIZE_DEFAULT = 1 + +AUTOTUNING_METRIC = "metric" +AUTOTUNING_METRIC_LATENCY = "latency" +AUTOTUNING_METRIC_THROUGHPUT = "throughput" +AUTOTUNING_METRIC_FLOPS = "flops" +AUTOTUNING_METRIC_FORWARD = "forward" +AUTOTUNING_METRIC_BACKWRAD = "flops" +AUTOTUNING_METRIC_STEPS = "step" +AUTOTUNING_METRIC_DEFAULT = AUTOTUNING_METRIC_THROUGHPUT + +######################################### +# MODEL INFO +######################################### +AUTOTUNING_MODEL_INFO_PATH = "model_info_path" +AUTOTUNING_MODEL_INFO_PATH_DEFAULT = None + +MODEL_INFO_FORMAT = ''' +"model_info": { + "num_params": 1000000000, + "hidden_size": 10, + "num_layers": 12, +} +''' +MODEL_INFO = "model_info" +MODEL_INFO_PROFILE = "profile" +MODEL_INFO_PROFILE_DEFAULT = False +MODEL_INFO_NUM_PARAMS = "num_params" +MODEL_INFO_NUM_PARAMS_DEFAULT = None +MODEL_INFO_HIDDEN_SIZE = "hidden_size" +MODEL_INFO_HIDDEN_SIZE_DEFAULT = None +MODEL_INFO_NUM_LAYERS = "num_layers" +MODEL_INFO_NUM_LAYERS_DEFAULT = None + +MODEL_INFO_KEY_DEFAULT_DICT = { + MODEL_INFO_PROFILE: MODEL_INFO_PROFILE_DEFAULT, + MODEL_INFO_NUM_PARAMS: MODEL_INFO_NUM_PARAMS_DEFAULT, + MODEL_INFO_HIDDEN_SIZE: MODEL_INFO_HIDDEN_SIZE_DEFAULT, + MODEL_INFO_NUM_LAYERS: MODEL_INFO_NUM_LAYERS_DEFAULT +} + +######################################### +# autotuner search space constants +######################################### + +DEFAULT_HF_CONFIG = { + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", +} + +DEFAULT_MIN_MEM_CONFIG = { + "train_micro_batch_size_per_gpu": 1, + "zero_optimization": { + "stage": 3 + }, + "memory_break_down": False +} + +DEFAULT_TUNING_SPACE_ZERO_0 = {"zero_optimization": {"stage": 0}} + +DEFAULT_TUNING_SPACE_ZERO_1 = { + "zero_optimization": { + "stage": 1, + "reduce_bucket_size": [5e7, 5e8, 1e9], + "allgather_bucket_size": [5e7, 5e8, 1e9], + } +} + +DEFAULT_TUNING_SPACE_ZERO_2 = { + "zero_optimization": { + "stage": 2, + "overlap_comm": [True, False], + "reduce_scatter": [False, True], + "reduce_bucket_size": [5e7, 5e8, 1e9], + "allgather_bucket_size": [5e7, 5e8, 1e9], + "contiguous_gradients": [False, True] + }, +} + +DEFAULT_TUNING_SPACE_ZERO_3 = { + "zero_optimization": { + "stage": 3, + "overlap_comm": [True, False], + "reduce_scatter": [False, True], + "reduce_bucket_size": [5e7, 5e8, 1e9], + "allgather_partitions": [True, False], + "allgather_bucket_size": [5e7, 5e8, 1e9], + "contiguous_gradients": [False, True] + }, +} + +GLOBAL_TUNING_SPACE = 'global' +# TUNING_MICRO_BATCH_SIZE_PREFIX="tune_micro_batch_size_z" +TUNING_MICRO_BATCH_SIZE_PREFIX = "z" diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/scheduler.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..7d2a1c081da9045e957569af06a3df4bb168a7da --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/scheduler.py @@ -0,0 +1,433 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import copy + +from numpy import BUFSIZE +import json +import subprocess +import sys +import threading +import time +import base64 + +import os +import hjson +from tqdm import tqdm + +from ..utils import logger +from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH +from .utils import get_val_by_key, search_error, was_interruptted +""" +thread-0: loop over experiment queue dispatching experiments if they become available +thread-N: start each experiment in its own thread +""" + +from deepspeed import comm as dist + +TIMEOUT = 5 + + +class ResourceManager: + + def __init__(self, args, hosts, num_gpus_per_node, results_dir, exps_dir, arg_mappings): + self.results_dir = results_dir + self.exps_dir = exps_dir + + self.nodes = [] + self.num_gpus_per_node = num_gpus_per_node + for host in hosts: + self.nodes.append(Node(host, num_gpus_per_node)) + + self.experiment_queue = [] + self.running_experiments = {} + self.finished_experiments = {} + self.experiment_count = 0 + self.exp_paths = set() + self.args = args + + self.arg_mappings = {} + if arg_mappings is not None: + for k, v in arg_mappings.items(): + k = k.strip() + v = v.strip() + if k not in self.arg_mappings: + self.arg_mappings[k] = v + + def schedule_experiments(self, exp_paths): + for exp_path in exp_paths: + if exp_path in self.exp_paths: + continue + else: + self.exp_paths.add(exp_path) + with open(exp_path, "r") as fd: + exp = hjson.load(fd) + exp["exp_id"] = self.experiment_count + self.experiment_count += 1 + + result_dir = exp["result_dir"] = os.path.join(self.results_dir, exp['name']) + if AUTOTUNING in exp["ds_config"]: + metric_file = os.path.join(result_dir, "metrics.json") + exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] = metric_file + stderr_file = os.path.join(result_dir, "stderr.log") + model_info_file = os.path.join(result_dir, "model_info.json") + metric_file = os.path.join(result_dir, "metrics.json") + + # skip existing experiments (except for the ones that were interrupted) + if os.path.exists(result_dir) and os.path.exists(stderr_file): + if not was_interruptted(stderr_file): + err = search_error(stderr_file) + exp_id = exp["exp_id"] + self.finished_experiments[exp_id] = (exp, err) + if err or os.path.exists(metric_file) or os.path.exists(model_info_file): + logger.info(f"Skipping exp {exp['name']} whose result already exists") + continue + + self.experiment_queue.append(exp) + + def run_job(self, exp: dict, reservations): + exp_id = exp["exp_id"] + exp["master_port"] = self.args.master_port + exp_id + exp["result_dir"] = os.path.join(self.results_dir, exp['name']) + user_script = self.args.user_script + user_args = self.args.user_args + + # overwrite the user arg in the arg_mappings + for key, val in self.arg_mappings.items(): + nval = get_val_by_key(exp, key) + if nval and str(nval) != "auto": + if val in user_args: + idx = user_args.index(val) + user_args[idx + 1] = str(nval) + else: + user_args.append(val) + user_args.append(str(nval)) + + t = threading.Thread(target=run_experiment, args=(exp, reservations, user_script, user_args)) + t.start() + self.running_experiments[exp_id] = (t, exp, reservations, time.time()) + + def experiment_check(self, pbar): + finished_exps = [] + for exp_id, exp_data in self.running_experiments.items(): + thread, exp_json, reservations, start_time = exp_data + logger.debug(f"Checking exp_id = {exp_id}, alive = {thread.is_alive()}") + thread.join(timeout=TIMEOUT) + if not thread.is_alive(): + exp_dir = exp_json["result_dir"] + stderr_file = os.path.join(exp_dir, "stderr.log") + err = search_error(stderr_file) + finished_exps.append((exp_id, reservations)) + self.finished_experiments[exp_id] = (exp_json, err) + duration = time.time() - start_time + logger.debug(f"Finished exp_id = {exp_id}, duration={duration:.2f} sec") + pbar.update(len(finished_exps)) + for exp_id, reservations in finished_exps: + for reservation in reservations: + reservation.restore_slots() + self.running_experiments.pop(exp_id) + time.sleep(TIMEOUT) + + def resource_request(self, exp): + num_gpus, num_nodes = exp['num_gpus'], exp['num_nodes'] + slot_request = num_gpus + reservations = [] + for node in self.nodes: + if num_nodes == 0: + break + slots = node.reserve_slots(slot_request=slot_request) + if slots: + reservations.append(Reservation(node=node, slots=slots)) + num_nodes -= 1 + + if num_nodes == 0: + # request satisfied + return reservations + else: + # request not satisfied + for reservation in reservations: + reservation.restore_slots() + + def status(self): + status = "" + for node in self.nodes: + status += f"{node.host} ({len(node.idle_slots)} idle gpus), " + return status[:-1] + + def run(self): + pbar = tqdm(total=len(self.experiment_queue)) + + while len(self.experiment_queue) > 0: + exp = self.experiment_queue.pop(0) + logger.debug(f'Popped exp_id = {exp["exp_id"]} from the queue') + logger.debug(f'Resource status: {self.status()}') + reservations = self.resource_request(exp) + + if not reservations: + logger.debug(f'Unable to schedule exp_id = {exp["exp_id"]}') + self.experiment_queue.insert(0, exp) + logger.debug(f'Put exp_id = {exp["exp_id"]} back into the queue') + self.experiment_check(pbar) + else: + desc = "" + for reservation in reservations: + reservation.slots.sort() + slots = ",".join(map(str, reservation.slots)) + desc += f"{reservation.node.host}:{slots}@" + desc = desc[:-1] + logger.debug(f'Running exp_id = {exp["exp_id"]} on {desc}') + self.run_job(exp, reservations) + + # All pending experiments are scheduled, waiting for them to complete + while len(self.running_experiments) > 0: + self.experiment_check(pbar) + + def save_exp_results_to_database(self, message, ranks=None, path=None): + """Print message when one of following condition meets + + + not dist.is_initialized() + + dist.get_rank() in ranks if ranks is not None or ranks = [-1] + + Args: + message (str) + ranks (list) + path (str) + + """ + should_log = not dist.is_initialized() + ranks = ranks or [] + my_rank = dist.get_rank() if dist.is_initialized() else -1 + if ranks and not should_log: + should_log = ranks[0] == -1 + should_log = should_log or (my_rank in set(ranks)) + logger.debug(f"*** Should log: {should_log}") + if should_log: + message['rank'] = my_rank + with open(path, 'a') as outfile: + json.dump(message, outfile) + outfile.write('\n') + + def parse_results(self, metric): + """ Parses the metric file of the finished experiments to select the optimal DeepSpeed configuration. + + Args: + finished_experiments (dcit): a dictionary of experiment id and experiment description. + + Returns: + The path to the result folder of the experiment with the optimal configuration. + """ + max_throughput = sys.float_info.min + best_exp_id = -1 + for exp_id, (exp, err) in self.finished_experiments.items(): + if err: + logger.info( + f"The experiment exp_id = {exp_id}, exp_name = {exp['name']}, did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Check the stderr.log in {exp['result_dir']}" + ) + continue + + metric_file = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] + + if os.path.exists(metric_file): + with open(metric_file, 'r') as f: + results = hjson.load(f) + curr_throughput = results[metric] + if curr_throughput > max_throughput: + max_throughput = curr_throughput + best_exp_id = exp_id + exp['results'] = results + + if best_exp_id != -1: + best_exp, _ = self.finished_experiments[best_exp_id] + return best_exp, max_throughput + + return exp, None + + def clear(self): + """Clear experiment queues, does not reset self.experiment_count + """ + self.experiment_queue = [] + # clean up the running experiments + for exp_id, exp_data in self.running_experiments.items(): + thread, exp_json, reservations, start_time = exp_data + clean_up(exp_json, reservations) + self.running_experiments = {} + self.finished_experiments = {} + self.exp_paths = set() + + +class Node: + + def __init__(self, host, max_slots): + self.host = host + self.max_slots = max_slots + self.idle_slots = list(range(max_slots)) + + def reserve_slots(self, slot_request: int) -> list: + if len(self.idle_slots) >= slot_request: + return [self.idle_slots.pop(0) for _ in range(slot_request)] + + def restore_slots(self, slots: list): + self.idle_slots += slots + + +class Reservation: + + def __init__(self, node, slots): + self.node = node + self.slots = slots + + def restore_slots(self): + self.node.restore_slots(self.slots) + + def desc(self): + slots = ",".join(map(str, self.slots)) + return f"{self.node.host}:{slots}@" + + +def get_job_id(): + # Infrastructure-specific job-id + infra_job_id = None + if "DLWS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLWS_JOB_ID"] + elif "DLTS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLTS_JOB_ID"] + else: + infra_job_id = "unknown-job-id" + + return infra_job_id + + +def get_user(): + user = None + if "USER" in os.environ: + user = os.environ["USER"] + else: + user = "unknown-user" + return user + + +def run_experiment(exp: dict, reservations, user_script, user_args): + include_str = "" + for reservation in reservations: + reservation.slots.sort() + slots = ",".join(map(str, reservation.slots)) + include_str += f"{reservation.node.host}:{slots}@" + include_str = include_str[:-1] + master_port = exp["master_port"] + hostfile = exp["hostfile"] + exp["launcher_args"] = [ + "--hostfile", + f"{hostfile}", + "--include", + f"{include_str}", + "--master_port", + str(master_port), + ] + logger.debug(f'launcher args={exp["launcher_args"]}') + + exp["user"] = get_user() + exp["job_id"] = get_job_id() + exp_dir = exp["result_dir"] + os.makedirs(exp_dir, exist_ok=True) + ds_config_path = os.path.join(exp_dir, "ds_config.json") + exp["ds_config_path"] = ds_config_path + + ds_config = copy.deepcopy(exp["ds_config"]) + ds_config_json = json.dumps(ds_config).encode('utf-8') + + exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8') + + with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd: + json.dump(ds_config, fd) + fd.flush() + os.fsync(fd) + path = exp["ds_config_path"] + logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}") + + with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd: + json.dump(exp, fd) + fd.flush() + os.fsync(fd) + path = os.path.join(exp_dir, "exp.json") + logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}") + + # remove "--deepspeed_config ds_config.json" from user_args + if user_args: + if "--deepspeed_config" in user_args: + idx = user_args.index("--deepspeed_config") + # "--deepspeed_config" is omitted in HF + elif "--deepspeed" in user_args: + idx = user_args.index("--deepspeed") + assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed" + # user_args[idx + 1] = exp["ds_config_path"] + # pass base64 serialized ds_config to launcher + user_args[idx + 1] = exp["ds_config_base64"] + + exp["user_script"] = user_script + exp["user_args"] = user_args + + cmd = ["deepspeed"] + exp["launcher_args"] + [user_script] + user_args + + assert len(exp["launcher_args"]) > 0, "must provide launcher args" + + with open(os.path.join(exp_dir, "cmd.txt"), "w", buffering=BUFSIZE) as fd: + fd.write(" ".join(cmd)) + fd.write("\n") + fd.flush() + os.fsync(fd) + + logger.info( + f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}" + ) + + with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open(os.path.join(exp_dir, "stderr.log"), + "wb") as err: + result = subprocess.Popen(cmd, stdout=out, stderr=err) + result.wait() + out.flush() + err.flush() + os.fsync(out) + os.fsync(err) + + clean_up(exp, reservations) + + logger.info(f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}") + + +PDSH_MAX_FAN_OUT = 1024 + + +def clean_up(exp: dict, reservations): + env = os.environ.copy() + env['PDSH_RCMD_TYPE'] = 'ssh' + + nodes_str = "" + for reservation in reservations: + nodes_str += f"{reservation.node.host}," + nodes_str = nodes_str[:-1] + logger.debug(f"Cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}") + + # PDSH flags for max node fan out and specific hosts to launch on + # See https://linux.die.net/man/1/pdsh for flag details + pdsh_cmd = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', nodes_str] + + kill_cmd = [ + 'pkill', + '-f', + exp['name'], + ] + cmd = pdsh_cmd + kill_cmd + logger.debug("cmd = {}".format(' '.join(cmd))) + + result = subprocess.Popen(cmd, env=env) + result.wait() + + # In case of failure must propagate the error-condition back to the caller (usually shell). The + # actual error and traceback should have been printed in the subprocess, so in order to avoid + # unnecessary noise we just quietly exit here with the same code as the subprocess + if result.returncode > 0: + sys.exit(result.returncode) + + logger.info(f"Done cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}") diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..676ae429e07745d3ad24051a2610e57ac42601f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .index_based_tuner import RandomTuner, GridSearchTuner +# from .ga_tuner import GATuner +from .model_based_tuner import ModelBasedTuner diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7957ce2aedc0a498ebb2a9bab8e6c45adb1ce4e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/base_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/base_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f83961075816e034c4867592395c166071f01ce4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/base_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/cost_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/cost_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15f0bb1ae7c92615693cb113bccc4a57472c1726 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/cost_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/index_based_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/index_based_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10d0a7b483e75f4aa24606f09e26c8f02101383a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/index_based_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/model_based_tuner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/model_based_tuner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6afb76766d5ff3cd04843c758da3d5789f883058 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/model_based_tuner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce2c189ef66ace7fa04278c2a2a9d68c9b37e9fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/base_tuner.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/base_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..b2da065e44f4074163dc22d1e2ef29d9d46a59f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/base_tuner.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys + +from deepspeed.autotuning.constants import * +from deepspeed.autotuning.utils import write_experiments +from deepspeed.utils import logger + + +class BaseTuner: + + def __init__(self, exps, resource_manager, metric): + self.all_exps = exps + self.rm = resource_manager + self.best_iter = 0 + self.best_exp = None + self.best_metric_val = None + self.metric = metric if metric else AUTOTUNING_METRIC_DEFAULT + logger.info(f"total number of exps = {len(self.all_exps)}") + + def has_next(self): + """Whether there exists more configurations for evaluation""" + if len(self.all_exps) > 0: + return True + else: + return False + + def next_batch(self, sample_size): + """Select the next batch of configurations for evaluation""" + raise NotImplementedError + + def update(self): + """"Update the tuner with what configurations have been evaluated and their performance results""" + + def tune(self, sample_size=1, n_trials=1000, early_stopping=None): + i = 0 + try: + while i < n_trials and self.has_next(): + # Select the next batch of configuration for evaluation + sampled_exps = self.next_batch(sample_size) + # Generate experiments for measurement of performance + exp_paths = write_experiments(sampled_exps, self.rm.exps_dir) + self.rm.schedule_experiments(exp_paths) + self.rm.run() + exp, metric_val = self.rm.parse_results(self.metric) + if self.best_exp is None or self.best_metric_val is None or (metric_val + and metric_val > self.best_metric_val): + # logger.info(f"tuner finds better = {exp}") + self.best_exp = exp + self.best_metric_val = metric_val + self.best_iter = i + + i += len(sampled_exps) + + # Update the tuner with evaluated performance results + self.update() + + self.rm.clear() + + # Early stop if no more promising configurations are likely to be found + if early_stopping and i >= self.best_iter + early_stopping: + logger.info( + f"Tuner early stopped at iteration {i}. Best iteration is {self.best_iter}. Early stopping threshold is {early_stopping}" + ) + break + return i + except: + logger.info("Tuner Error:", sys.exc_info()[0]) + return i diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/cost_model.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/cost_model.py new file mode 100644 index 0000000000000000000000000000000000000000..c12b10f743632c36a61711a411e8bb706041b762 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/cost_model.py @@ -0,0 +1,66 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .utils import * + +try: + import xgboost as xgb +except ImportError: + xgb = None + + +class XGBoostCostModel(): + + def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None): + + assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra." + + self.loss_type = loss_type + + if loss_type == "reg": + self.xgb_params = { + "max_depth": 3, + "gamma": 0.0001, + "min_child_weight": 1, + "subsample": 1.0, + "eta": 0.3, + "lambda": 1.0, + "alpha": 0, + "objective": "reg:linear", + } + elif loss_type == "rank": + self.xgb_params = { + "max_depth": 3, + "gamma": 0.0001, + "min_child_weight": 1, + "subsample": 1.0, + "eta": 0.3, + "lambda": 1.0, + "alpha": 0, + "objective": "rank:pairwise", + } + else: + raise RuntimeError("Invalid loss type: " + loss_type) + + self.xgb_params["verbosity"] = 0 + if num_threads: + self.xgb_params["nthread"] = num_threads + + def fit(self, xs, ys): + x_train = np.array(xs, dtype=np.float32) + y_train = np.array(ys, dtype=np.float32) + y_max = np.max(y_train) + y_train = y_train / max(y_max, 1e-9) + + index = np.random.permutation(len(x_train)) + dtrain = xgb.DMatrix(x_train[index], y_train[index]) + + self.bst = xgb.train(self.xgb_params, dtrain) + + def predict(self, xs): + + features = xgb.DMatrix(xs) + + return self.bst.predict(features) diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/index_based_tuner.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/index_based_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c822be0d35ff68a42355322797cc4b0c8c1429 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/index_based_tuner.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import random + +from .base_tuner import BaseTuner + + +class RandomTuner(BaseTuner): + """Explore the search space in random order""" + + def __init__(self, exps: list, resource_manager, metric): + super().__init__(exps, resource_manager, metric) + + def next_batch(self, sample_size=1): + if sample_size > len(self.all_exps): + sample_size = len(self.all_exps) + + sampled_batch = random.sample(self.all_exps, sample_size) + self.all_exps = [x for x in self.all_exps if x not in sampled_batch] + + return sampled_batch + + +class GridSearchTuner(BaseTuner): + """Explore the search space in sequential order""" + + def __init__(self, exps: list, resource_manager, metric): + super().__init__(exps, resource_manager, metric) + + def next_batch(self, sample_size=1): + if sample_size > len(self.all_exps): + sample_size = len(self.all_exps) + + sampled_batch = self.all_exps[0:sample_size] + self.all_exps = [x for x in self.all_exps if x not in sampled_batch] + + return sampled_batch diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/model_based_tuner.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/model_based_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..aec9264f9b7c8623923fd62cd0145871fbc74214 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/model_based_tuner.py @@ -0,0 +1,157 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import hjson + +from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH +from .base_tuner import BaseTuner +from .cost_model import XGBoostCostModel +from .utils import * +from ..utils import * +import numbers +from ..constants import AUTOTUNING_METRIC_LATENCY + +INIT_NUM = 2 + + +class ModelBasedTuner(BaseTuner): + """Exploring the search space with a cost model""" + + def __init__(self, exps: list, resource_manager, metric, tuning_space): + super().__init__(exps, resource_manager, metric) + self.tuning_space = tuning_space + self.best_iter = 0 + + self.all_configs = [e['ds_config'] for e in exps] + self.num_all_configs = len(self.all_configs) + + self.dims = dict_to_dims(self.tuning_space) + + logger.info(f"Create config dim: {self.dims}, all configs: {self.num_all_configs}") + + self.visited = set([]) + + self.trials = [] + self.trial_pt = 0 + + init_num = min(INIT_NUM, self.num_all_configs) + + for _ in range(init_num): + exp_feature = np.random.randint(self.num_all_configs) + exp_feature = 0 + while exp_feature in self.visited: + exp_feature = np.random.randint(self.num_all_configs) + self.trials.append(exp_feature) + self.visited.add(exp_feature) + + self.cost_model = XGBoostCostModel("rank") + + self.evaluated_configs = [] + self.evaluated_perf = [] + + self.train_ct = 0 + + self.random_exploration_ratio = 0.2 # do random exploration + + def find_estimated_top_configs(self): + """Use the cost model to predict the estimated performance of configurations and find the top ones for the next round of evaluation""" + + configs = [] + + for c in self.all_configs: + flattened_ds_config = flatten(c) + feature_val = [] + for k, v in flattened_ds_config.items(): + if isinstance(v, numbers.Number): + feature_val.append(v) + configs.append(feature_val) + # print(configs) + # TODO the current implementation requires that all configs have the same shape. + configs = np.array(configs, dtype=np.float32) + estimates = self.cost_model.predict(configs) + + n = len(estimates) + top_idx = np.argsort(estimates) + top_idx_ret = top_idx if self.metric == AUTOTUNING_METRIC_LATENCY else top_idx[::-1][:n] + + # top_configs = [self.all_configs[i] for i in top_idx] + + return top_idx_ret + + def next_batch(self, sample_size): + sampled_batch = [] + + counter = 0 + while counter < sample_size: + + if len(self.visited) >= self.num_all_configs: + break + + while self.trial_pt < len(self.trials): + logger.debug(f"trials: {self.trials}") + # Select top promising trials + index = self.trials[self.trial_pt] + if index not in self.visited: + break + self.trial_pt += 1 + + # To avoid over-exploitation, randomly select one that has not been explored. + rand = np.random.rand() + if rand < self.random_exploration_ratio: + # Do normal selection + feature = np.random.choice(self.trials) + while index in self.visited: + index = np.random.randint(self.num_all_configs) + + # Need to track both the sampled configs and indices + + sampled_batch.append(self.all_exps[index]) + self.visited.add(index) + counter += 1 + + return sampled_batch + + def has_next(self): + return len(self.visited) < self.num_all_configs + + def update(self): + for exp_id, (exp, err) in self.rm.finished_experiments.items(): + feature_val = [] + if err: + logger.info( + f"Skipping exp_id = {exp_id}, exp_name = {exp['name']}, the experiment did not run successfully with error = {err}, thus a metrics.txt does not exist for it. Please check the stderr.log in {exp['result_dir']}" + ) + ds_config = exp["ds_config"] + flattened_ds_config = flatten(ds_config) + for k, v in flattened_ds_config.items(): + if isinstance(v, numbers.Number): + feature_val.append(v) + self.evaluated_configs.append(feature_val) + self.evaluated_perf.append(0.0) + continue + + p = exp["ds_config"][AUTOTUNING][AUTOTUNING_METRIC_PATH] + with open(p, 'r') as f: + results = hjson.load(f) + curr_iter = results[self.metric] + logger.debug(f"parsing the results for {exp_id}, Result is {curr_iter}") + + ds_config = exp["ds_config"] + flattened_ds_config = flatten(ds_config) + for k, v in flattened_ds_config.items(): + if isinstance(v, numbers.Number): + feature_val.append(v) + self.evaluated_configs.append(feature_val) + self.evaluated_perf.append(curr_iter) + + logger.debug(f"**Evaluated configs: {len(self.evaluated_configs)}, evaluated perf: {self.evaluated_perf}") + + self.cost_model.fit(self.evaluated_configs, self.evaluated_perf) + + estimated_top_configs = self.find_estimated_top_configs() + + self.trials = estimated_top_configs + self.trial_pt = 0 + self.train_ct += 1 diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/utils.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ada643f2c02ce9b0433c89295e932f9f49740eac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/tuner/utils.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import numpy as np +import itertools +from ..utils import * +import collections.abc + + +def index_to_feature(p, dims): + """convert index form (single integer) to feature form (vector)""" + feature = [] + for dim in dims: + feature.append(p % dim) + p //= dim + return feature + + +def feature_to_index(feature, dims): + """convert feature form (vector) to index form (single integer)""" + p = 0 + for j, k in enumerate(feature): + print("j:", "k:", k, "dims", dims[:j]) + p += int(np.prod(dims[:j])) * k + return p + + +def dict_to_dims(tuning_space): + + dims = [] + + for key, val in tuning_space.items(): + if isinstance(val, dict): + dims.extend(dict_to_dims(val)) + elif isinstance(val, list): + dims.append(len(val)) + else: + dims.append(1) + + return dims + + +def gen_combinations(d: dict): + keys, values = d.keys(), d.values() + for v in values: + if not isinstance(v, list): + v = [v] + values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values) + for comb in itertools.product(*values_choices): + yield dict(zip(keys, comb)) + + +def flatten(d, parent_key='', sep='_'): + items = [] + for k, v in d.items(): + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, collections.abc.MutableMapping): + items.extend(flatten(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def dict_to_feature(feature_dict, keys, max_value=None): + """Extract values from dict""" + feature = [] + for key, val in feature_dict.items(): # First level + if key not in keys: + continue + if val is None or val == "auto" or key == "autotuning" or val == "": + continue + if isinstance(val, dict): + feature.append(dict_to_feature(val, max_value)) + else: + feature.append(float(val)) + + # normalization, should not matter in tree models + if max_value is not None: + norm_feature = [] + for f, mv in zip(feature, max_value): + norm_feature.append(f / mv) + feature = norm_feature + + return feature diff --git a/venv/lib/python3.10/site-packages/deepspeed/autotuning/utils.py b/venv/lib/python3.10/site-packages/deepspeed/autotuning/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b851353520fb87a5d9680644d0aa447cfa28d32f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/autotuning/utils.py @@ -0,0 +1,459 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import re +import collections.abc +import os +import json +from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU +import itertools +import copy + +from ..utils import logger + + +def search_error(filename): + if not os.path.exists(filename): + return "stderr.log does not exist" + with open(filename) as f: + for line in f: + for s in ["Error", "error", "ERROR"]: + idx = line.find(s) + if idx != -1: + return line[idx + len(s):].lstrip(": ") + return None + + +def was_interruptted(filename): + if not os.path.exists(filename): + return "stderr.log does not exist" + with open(filename) as f: + for line in f: + s = "KeyboardInterrupt" + idx = line.find(s) + if idx != -1: + return True + return False + + +def find_replace_str(value, replace_dict): + if not isinstance(value, str): + return str(value) + + matches = re.findall(r"\$[\w]+", value) + for var in matches: + var_key = var.replace("$", "").lower() + if var_key == "nvme_path": + continue + assert var_key in replace_dict, f"unknown var key: {var_key}, in {replace_dict}" + if isinstance(replace_dict[var_key], str): + value = value.replace(var, replace_dict[var_key]) + else: + assert len(matches) == 1, "unable to replace multiple non-string matches" + value = replace_dict[var_key] + return value + + +def find_replace(target, replace_dict): + if isinstance(target, dict): + for key, value in target.items(): + if isinstance(value, str): + target[key] = find_replace_str(value, replace_dict) + if isinstance(value, list): + for i in range(len(value)): + value[i] = find_replace_str(value[i], replace_dict) + if isinstance(value, dict): + find_replace(value, replace_dict) + elif isinstance(target, list): + for i in range(len(target)): + target[i] = str(find_replace_str(target[i], replace_dict)) + + +def get_list(val): + if not isinstance(val, list): + return [val] + else: + return val + + +def combine_dict(d, u): + for k, v in u.items(): + if isinstance(v, collections.abc.Mapping): + d[k] = combine_dict(d.get(k, {}), v) + else: + if k not in d: + d[k] = v + else: + if not isinstance(d[k], list): + d[k] = [d[k]] + d[k].extend(i for i in get_list(v) if i not in d[k]) + return d + + +def del_if_exists(t, d): + """Deletes a key from a dictionary if it exists. + + Args: + t (string): target key to delete + d (dict): dictionary to delete from + """ + if t in d: + del d[t] + return + for k, v in d.items(): + if isinstance(v, collections.abc.Mapping): + del_if_exists(t, v) + + +def replace_dict(d, u, ignored_keys=[]): + """Replaces values in dict d with values in dict u. + + Args: + d (dict): the target dict to overwrite + u (dict): the dict containing the values to overwrite the target dict + + Returns: + dict d with values overwritten by the corresponding ones in dict u. + """ + if u is not None: + for k, v in u.items(): + if k not in ignored_keys: + if v is None: + del_if_exists(k, d) + continue + if isinstance(v, collections.abc.Mapping): + d[k] = replace_dict(d.get(k, {}), v, ignored_keys) + else: + d[k] = v + return d + + +def get_val_by_key(d: dict, k): + if k in d: + return d[k] + for v in d.values(): + if isinstance(v, dict): + return get_val_by_key(v, k) + return None + + +def set_val_by_key(d: dict, k, vv): + if k in d: + d[k] = vv + for v in d.values(): + if isinstance(v, dict): + set_val_by_key(v, k, vv) + + +def fetch_hostfile(hostfile_path): + if not os.path.isfile(hostfile_path): + logger.warning("Unable to find hostfile, will proceed with training " + "with local resources only.") + return None + + # e.g., worker-0 slots=16 + with open(hostfile_path, 'r') as fd: + resource_pool = collections.OrderedDict() + for line in fd.readlines(): + line = line.strip() + if line == '': + # skip empty lines + continue + try: + hostname, slots = line.split() + _, slot_count = slots.split("=") + slot_count = int(slot_count) + except ValueError as err: + logger.error("Hostfile is not formatted correctly, unable to " + "proceed with training.") + raise err + if hostname in resource_pool: + logger.error("Hostfile contains duplicate hosts, unable to " + "proceed with training.") + raise ValueError("host {} is already defined".format(hostname)) + resource_pool[hostname] = slot_count + + return resource_pool + + +def validate_ds_config(config: dict): + + def is_False(config: dict, key): + if config is None: + return False + return bool(config.get(key)) + + config_zero = config.get("zero_optimization", {}) + if not config_zero: + return True + stage = config_zero.get("stage") + offload = False + if stage == 1: + return True + elif stage == 2: + if is_False(config_zero, "cpu_offload") and is_False(config_zero, "cpu_offload_params"): + return False + elif stage == 3: + offload_devices = ["cpu", "nvme"] + if config_zero.get("offload_optimizer", {}).get("device") in offload_devices: + offload = True + if config_zero.get("offload_param", {}).get("device") in offload_devices: + offload = True + else: + return True + + # HF requires that "ZeRO Offload can only work with DeepSpeed optimizers" + if offload and not config.get("optimizer"): + return False + + return True + + +def remove_dupe_dicts(l): + """ Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures. + + Args: + l (list): a list of (nested) data structures. + + Returns: + A list of unique values. + """ + list_of_strings = [json.dumps(d, sort_keys=True) for d in l] + list_of_strings = set(list_of_strings) + return [json.loads(s) for s in list_of_strings] + + +def prune_config(config, ignored_keys=[]): + """ Prunes the input configurations + + Args: + configs (dict): A configuration dictionary. + ignored_keys (list, optional): the keys of the sections to delete. Defaults to []. + + Returns: + A configuration dictionary. + """ + if ignored_keys: + for k in ignored_keys: + + def find_del_key(d: dict, k: str): + if k in d: + del d[k] + else: + for dd in d.values(): + if isinstance(dd, dict): + find_del_key(dd, k) + + find_del_key(config, k) + + +def prune_configs(configs, ignored_keys=[]): + """ Prunes the input list of configurations + + Args: + configs (list): A list of configuration dictionaries. + ignored_keys (list, optional): the keys of the sections to delete. Defaults to []. + + Returns: + A list of valid and unique configuration dictionaries. + """ + pruned_list = [] + for config in configs: + prune_config(config, ignored_keys) + pruned_list.append(config) + + return remove_dupe_dicts(pruned_list) + + +def get_tuning_keys(tuning_space: dict): + """Outputs the list of tunable parameters in the tuning space dict. + + Args: + tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values. + + Returns: + A list of strings + """ + tuning_keys = [] + for key, val in tuning_space.items(): + if isinstance(val, dict): + tuning_keys.extend(get_tuning_keys(val)) + if isinstance(val, list) and len(val) > 1: + tuning_keys.append(key) + return tuning_keys + + +def get_all_configs(tuning_space: dict, ignore_keys=None): + """ Splits the tuning space dictionary to result in all combinations of values. + + Args: + tuning_space (dict): the tuning space where tunable parameters are lists of values. + """ + + def gen_combinations(d: dict): + keys, values = d.keys(), d.values() + for v in values: + if not isinstance(v, list): + v = [v] + values_choices = (gen_combinations(v) if isinstance(v, dict) else get_list(v) for v in values) + for comb in itertools.product(*values_choices): + yield dict(zip(keys, comb)) + + all_configs = [] + ignored_key_vals = {} + for ik in ignore_keys: + ignored_key_vals[ik] = tuning_space.get(ik, {}) + del_if_exists(ik, tuning_space) + for c in gen_combinations(tuning_space): + replace_dict(c, ignored_key_vals) + all_configs.append(c) + return all_configs + + +def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False): + """ Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys. + Args: + config (dict): the config dict used to generate the name + tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None. + prefix (str, optional): a string added to the beginning of the name. Defaults to None. + """ + if TRAIN_MICRO_BATCH_SIZE_PER_GPU not in tuning_keys: + tuning_keys.append(TRAIN_MICRO_BATCH_SIZE_PER_GPU) + if GRADIENT_ACCUMULATION_STEPS not in tuning_keys: + tuning_keys.append(GRADIENT_ACCUMULATION_STEPS) + tuning_keys.sort() + + def get_offload_name(offload_config): + cname = "" + if offload_config is None: + return "None_" + for key, val in offload_config.items(): + key = "".join(map(lambda c: c[0], key.split('_'))) + if (isinstance(val, int) or isinstance(val, float)) and val > 9000: + cname += key + '{:.1e}'.format(val) + "_" + else: + if isinstance(val, bool): + val = "T" if val else "F" + cname += f"{key}{val}_" + return cname + + def get_name_by_keys(config: dict, tuning_keys=None, omit_val=False): + cname = "" + if not tuning_keys or config is None: + return cname + for key, val in config.items(): + # skip the arg_mappings section when naming the exp file + if key == "arg_mappings": + continue + if key == "offload_param": + cname += "op_" + if not omit_val: + cname += get_offload_name(val) + continue + if key == "offload_optimizer": + cname += "oo_" + if not omit_val: + cname += get_offload_name(val) + continue + # recursively call the func to get name for the child dicts + if isinstance(val, dict): + n = get_name_by_keys(val, tuning_keys, omit_val=omit_val) + if n != "": + cname += n + "_" + if tuning_keys and key not in tuning_keys: + continue + + key_str = "".join(map(lambda c: c[0], key.split('_'))) + + if not omit_val: + if (isinstance(val, int) or isinstance(val, float)) and val > 9000: + cname += key_str + '{:.1e}'.format(val) + "_" + else: + if isinstance(val, bool): + val = "T" if val else "F" + cname += f"{key_str}{val}_" + else: + cname += key_str + "_" + + return cname[:-1] + + name = get_name_by_keys(config, tuning_keys, omit_val=omit_val) + + return prefix + (name if name != "" else "exp") + + +def get_first_config(config: dict): + if not config: + return None + cfg = copy.deepcopy(config) + + for key, val in cfg.items(): + if isinstance(val, dict): + if key == "optimizer": # use user defined optimizer which might have lists of values as params + cfg[key] = val + else: + cfg[key] = get_first_config(val) + if isinstance(val, list) and len(val) > 0: + cfg[key] = val[0] + return cfg + + +def write_experiments(exps: list, exps_dir: str): + exp_paths = [] + for exp in exps: + exp_name = exp['name'] + # write the expr config to a json file + exp_path = os.path.join(exps_dir, f'{exp_name}.json') + with open(exp_path, 'w') as fd: + + json.dump(exp, fd) + exp_paths.append(exp_path) + return exp_paths + + +def memory_to_string(n, postfix="", units=None, precision=2): + if units is None: + if n // 10**12 > 0: + return str(round(n / 1024**4, precision)) + " T" + postfix + if n // 10**9 > 0: + return str(round(n / 1024**3, precision)) + " G" + postfix + elif n // 10**6 > 0: + return str(round(n / 1024**2, precision)) + " M" + postfix + elif n // 10**3 > 0: + return str(round(n / 1014, precision)) + " K" + postfix + else: + return str(n) + " " + else: + if units == "T": + return str(round(n / 1024**4, precision)) + " " + units + if units == "G" + postfix: + return str(round(n / 1024**3, precision)) + " " + units + elif units == "M" + postfix: + return str(round(n / 1024**2, precision)) + " " + units + elif units == "K" + postfix: + return str(round(n / 1024, precision)) + " " + units + else: + return str(n) + " " + + +def number_to_string(n, postfix="", units=None, precision=2): + if units is None: + if n // 10**9 > 0: + return str(round(n / 1000**3, precision)) + " B" + postfix + if n // 10**6 > 0: + return str(round(n / 1000**2, precision)) + " M" + postfix + elif n // 10**3 > 0: + return str(round(n / 1000**1, precision)) + " K" + postfix + else: + return str(n) + " " + else: + if units == "B" + postfix: + return str(round(n / 1000**3, precision)) + " " + units + elif units == "M" + postfix: + return str(round(n / 1000**2, precision)) + " " + units + elif units == "K" + postfix: + return str(round(n / 1000**1, precision)) + " " + units + else: + return str(n) + " " diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91508d468941125deb7d4dacb31bb6426a53b2ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ed6f0acbb4edf32d7c8694091dcab106c03142 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1da4f10e4a58e3df4225fc78763a63db44a400c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elastic_agent.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elastic_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..302b3ee7887f3b6b96058372a131d1c32b40d081 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elastic_agent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elasticity.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elasticity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d36068d28bd050f01a0bd7aa386142ba1f9e2b90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/elasticity/__pycache__/elasticity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea5ce5af19eb790688c1a99f9ec8e396ebd6178 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from . import adam +from . import adagrad +from . import lamb +from . import lion +from . import sparse_attention +from . import transformer + +from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig + +from ..git_version_info import compatible_ops as __compatible_ops__ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6aea264237e366b9978c8bd096a2fbc28945705e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..op_builder import AsyncIOBuilder diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8d5612e8c8e20aee72541224f829355588fe518 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/aio/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..995bbae4aeaf7ceb5ae67276901a48d4a7cfb3b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .quantize import FP_Quantize, Quantizer diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a5d1da5e3ae0fa097e7313ddb1328c4f910801d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .quantizer import ds_quantizer diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38b7b33f39bd70ff9c87dade0b00e5d30c9cfbe Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/quantizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0873c1b12fdf3b325df7c984237e7f6f123af2c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/__pycache__/quantizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/quantizer.py b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..eb4bfd35700075f3b32db329c5f7026b39bef520 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/quantizer/quantizer.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from deepspeed.ops.op_builder import QuantizerBuilder + +# Cuda modules will be imported if needed +quantizer_cuda_module = None + + +def ds_quantizer(input, groups=1, bit_num=8, sr=False, asym=False): + # Load cuda modules if needed + global quantizer_cuda_module + if quantizer_cuda_module is None: + quantizer_cuda_module = QuantizerBuilder().load() + if sr: + if asym: + quantize_func = quantizer_cuda_module.ds_sr_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_asym_fp32 + else: + quantize_func = quantizer_cuda_module.ds_sr_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_fp32 + else: + if asym: + quantize_func = quantizer_cuda_module.ds_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_asym_fp32 + else: + quantize_func = quantizer_cuda_module.ds_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_fp32 + return quantize_func(input, groups, bit_num) diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/profiling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/profiling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..927169cfc8b8f15dadd14650de96b7021d3e3593 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3929a86f5b8ea4dcba78311ad9f99e45dc1363 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13ed3afeea89da653bf27415f688af668a4535a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/profiling/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/config.py b/venv/lib/python3.10/site-packages/deepspeed/profiling/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e4f06630ea6f6801883046efb5f61df3e7132f63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/profiling/config.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject +from deepspeed.profiling.constants import * + + +class DeepSpeedFlopsProfilerConfig(DeepSpeedConfigObject): + + def __init__(self, param_dict): + super(DeepSpeedFlopsProfilerConfig, self).__init__() + + self.enabled = None + self.recompute_fwd_factor = None + self.profile_step = None + self.module_depth = None + self.top_modules = None + + if FLOPS_PROFILER in param_dict.keys(): + flops_profiler_dict = param_dict[FLOPS_PROFILER] + else: + flops_profiler_dict = {} + + self._initialize(flops_profiler_dict) + + def _initialize(self, flops_profiler_dict): + self.enabled = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_ENABLED, FLOPS_PROFILER_ENABLED_DEFAULT) + + self.recompute_fwd_factor = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_RECOMPUTE_FWD_FACTOR, + FLOPS_PROFILER_RECOMPUTE_FWD_FACTOR_DEFAULT) + + self.profile_step = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_PROFILE_STEP, + FLOPS_PROFILER_PROFILE_STEP_DEFAULT) + + self.module_depth = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_MODULE_DEPTH, + FLOPS_PROFILER_MODULE_DEPTH_DEFAULT) + + self.top_modules = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_TOP_MODULES, + FLOPS_PROFILER_TOP_MODULES_DEFAULT) + + self.detailed = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_DETAILED, FLOPS_PROFILER_DETAILED_DEFAULT) + + self.output_file = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_OUTPUT_FILE, + FLOPS_PROFILER_OUTPUT_FILE_DEFAULT) diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/constants.py b/venv/lib/python3.10/site-packages/deepspeed/profiling/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..0374303d7d969c76cd6771b2b18bf46f94df8d04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/profiling/constants.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######################################### +# flops profiler +######################################### +# Flops profiler. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +FLOPS_PROFILER_FORMAT = ''' +flops profiler should be enabled as: +"session_params": { + "flops_profiler": { + "enabled": true, + "recompute_fwd_factor": 0.0, + "profile_step": 1, + "module_depth": -1, + "top_modules": 3, + "detailed": true, + "output_file": null + } +} +''' + +FLOPS_PROFILER = "flops_profiler" + +FLOPS_PROFILER_ENABLED = "enabled" +FLOPS_PROFILER_ENABLED_DEFAULT = False + +FLOPS_PROFILER_RECOMPUTE_FWD_FACTOR = "recompute_fwd_factor" +FLOPS_PROFILER_RECOMPUTE_FWD_FACTOR_DEFAULT = 0.0 + +FLOPS_PROFILER_PROFILE_STEP = "profile_step" +FLOPS_PROFILER_PROFILE_STEP_DEFAULT = 1 + +FLOPS_PROFILER_MODULE_DEPTH = "module_depth" +FLOPS_PROFILER_MODULE_DEPTH_DEFAULT = -1 + +FLOPS_PROFILER_TOP_MODULES = "top_modules" +FLOPS_PROFILER_TOP_MODULES_DEFAULT = 1 + +FLOPS_PROFILER_DETAILED = "detailed" +FLOPS_PROFILER_DETAILED_DEFAULT = True + +FLOPS_PROFILER_OUTPUT_FILE = "output_file" +FLOPS_PROFILER_OUTPUT_FILE_DEFAULT = None diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..980d8a5e92fc4c3491f822479d05c5e2776233df --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .profiler import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96888f3c5099ddf43e3433bb8390b52e4d6c2ad4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a651774ff73f839515af2c740784dadf53a0543 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/profiler.py b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..de847e59e82eaae01143657f1ed22d753fba77b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/profiling/flops_profiler/profiler.py @@ -0,0 +1,1257 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import time +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from typing import List, Optional +from collections import OrderedDict +import numpy as np +from deepspeed.accelerator import get_accelerator +from deepspeed.utils import logger +from deepspeed.moe.layer import MoE +from deepspeed.utils.timer import FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, STEP_GLOBAL_TIMER + +Tensor = torch.Tensor + +module_flop_count = [] +module_mac_count = [] +old_functions = {} + +DEFAULT_PRECISION = 2 + + +class FlopsProfiler(object): + """Measures the latency, number of estimated floating-point operations and parameters of each module in a PyTorch model. + + The flops-profiler profiles the forward pass of a PyTorch model and prints the model graph with the measured profile attached to each module. It shows how latency, flops and parameters are spent in the model and which modules or layers could be the bottleneck. It also outputs the names of the top k modules in terms of aggregated latency, flops, and parameters at depth l with k and l specified by the user. The output profile is computed for each batch of input. + The DeepSpeed flops profiler can be used with the DeepSpeed runtime or as a standalone package. + When using DeepSpeed for model training, the flops profiler can be configured in the deepspeed_config file and no user code change is required. + + If using the profiler as a standalone package, one imports the flops_profiler package and use the APIs. + + Here is an example for usage in a typical training workflow: + + .. code-block:: python + + model = Model() + prof = FlopsProfiler(model) + + for step, batch in enumerate(data_loader): + if step == profile_step: + prof.start_profile() + + loss = model(batch) + + if step == profile_step: + flops = prof.get_total_flops(as_string=True) + params = prof.get_total_params(as_string=True) + prof.print_model_profile(profile_step=profile_step) + prof.end_profile() + + loss.backward() + optimizer.step() + + To profile a trained model in inference, use the `get_model_profile` API. + + Args: + object (torch.nn.Module): The PyTorch model to profile. + """ + + def __init__(self, model, ds_engine=None, recompute_fwd_factor=0.0): + self.model = model + self.ds_engine = ds_engine + self.recompute_fwd_factor = recompute_fwd_factor + self.started = False + self.func_patched = False + + def start_profile(self, ignore_list=None): + """Starts profiling. + + Extra attributes are added recursively to all the modules and the profiled torch.nn.functionals are monkey patched. + + Args: + ignore_list (list, optional): the list of modules to ignore while profiling. Defaults to None. + """ + logger.info("Flops profiler started") + self.reset_profile() + _patch_functionals() + _patch_tensor_methods() + + def register_module_hooks(module, ignore_list): + if ignore_list and type(module) in ignore_list: + return + + # if computing the flops of a module directly + if type(module) in MODULE_HOOK_MAPPING: + if not hasattr(module, "__flops_handle__"): + module.__flops_handle__ = module.register_forward_hook(MODULE_HOOK_MAPPING[type(module)]) + return + + # if computing the flops of the functionals in a module + def pre_hook(module, input): + module_flop_count.append([]) + module_mac_count.append([]) + + if not hasattr(module, "__pre_hook_handle__"): + module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook) + + def post_hook(module, input, output): + if module_flop_count: + module.__flops__ += sum([elem[1] for elem in module_flop_count[-1]]) + module_flop_count.pop() + module.__macs__ += sum([elem[1] for elem in module_mac_count[-1]]) + module_mac_count.pop() + + if not hasattr(module, "__post_hook_handle__"): + module.__post_hook_handle__ = module.register_forward_hook(post_hook) + + def start_time_hook(module, input): + get_accelerator().synchronize() + module.__start_time__ = time.time() + + if not hasattr(module, "__start_time_hook_handle"): + module.__start_time_hook_handle__ = module.register_forward_pre_hook(start_time_hook) + + def end_time_hook(module, input, output): + get_accelerator().synchronize() + module.__duration__ += time.time() - module.__start_time__ + + if not hasattr(module, "__end_time_hook_handle__"): + module.__end_time_hook_handle__ = module.register_forward_hook(end_time_hook) + + self.model.apply(partial(register_module_hooks, ignore_list=ignore_list)) + self.started = True + self.func_patched = True + + def stop_profile(self): + """Stop profiling. + + All torch.nn.functionals are restored to their originals. + """ + if self.started and self.func_patched: + _reload_functionals() + _reload_tensor_methods() + self.func_patched = False + + def remove_profile_attrs(module): + if hasattr(module, "__pre_hook_handle__"): + module.__pre_hook_handle__.remove() + del module.__pre_hook_handle__ + if hasattr(module, "__post_hook_handle__"): + module.__post_hook_handle__.remove() + del module.__post_hook_handle__ + if hasattr(module, "__flops_handle__"): + module.__flops_handle__.remove() + del module.__flops_handle__ + if hasattr(module, "__start_time_hook_handle__"): + module.__start_time_hook_handle__.remove() + del module.__start_time_hook_handle__ + if hasattr(module, "__end_time_hook_handle__"): + module.__end_time_hook_handle__.remove() + del module.__end_time_hook_handle__ + + self.model.apply(remove_profile_attrs) + + def reset_profile(self): + """Resets the profiling. + + Adds or resets the extra attributes. + """ + + def get_param_count_and_ep(param): + """ + Return the number of parameters in the layer, whether the layer is an MoE layer, + and its expert parallelism size if so + """ + prefix = 'ep_size_' + offset = len(prefix) + expert_parallelism = 0 + if getattr(param, "group_name", "").startswith(prefix): + try: + expert_parallelism = int(param.group_name[offset:]) + except ValueError: + pass + return param.numel(), expert_parallelism, param.element_size() + + def add_or_reset_attrs(module): + module.__flops__ = 0 + module.__macs__ = 0 + module.__params__ = module.__expert_params__ = module.__model_expert_params__ = 0 + parameters = (get_param_count_and_ep(p) for p in module.parameters()) + for num_params, expert_parallelism, per_param_size in parameters: + params = num_params if not expert_parallelism else 0 + expert_params = num_params if expert_parallelism else 0 + # number of expert parameters taking into account other expert parallel groups + model_expert_params = num_params * expert_parallelism + module.__params__ += params + module.__expert_params__ += expert_params + module.__model_expert_params__ += model_expert_params + module.__start_time__ = 0 + module.__duration__ = 0 + + self.model.apply(add_or_reset_attrs) + + def end_profile(self): + """Ends profiling. + + The added attributes and handles are removed recursively on all the modules. + """ + if not self.started: + return + self.stop_profile() + self.started = False + + def remove_profile_attrs(module): + if hasattr(module, "__flops__"): + del module.__flops__ + if hasattr(module, "__macs__"): + del module.__macs__ + if hasattr(module, "__params__"): + del module.__params__ + if hasattr(module, "__expert_params__"): + del module.__expert_params__ + if hasattr(module, "__model_expert_params__"): + del module.__model_expert_params__ + if hasattr(module, "__start_time__"): + del module.__start_time__ + if hasattr(module, "__duration__"): + del module.__duration__ + + self.model.apply(remove_profile_attrs) + logger.info("Flops profiler finished") + + def get_total_flops(self, as_string=False): + """Returns the total flops of the model. + + Args: + as_string (bool, optional): whether to output the flops as string. Defaults to False. + + Returns: + The number of multiply-accumulate operations of the model forward pass. + """ + total_flops = get_module_flops(self.model) + return number_to_string(total_flops) if as_string else total_flops + + def get_total_macs(self, as_string=False): + """Returns the total MACs of the model. + + Args: + as_string (bool, optional): whether to output the flops as string. Defaults to False. + + Returns: + The number of multiply-accumulate operations of the model forward pass. + """ + total_macs = get_module_macs(self.model) + return macs_to_string(total_macs) if as_string else total_macs + + def get_total_duration(self, as_string=False): + """Returns the total duration of the model forward pass. + + Args: + as_string (bool, optional): whether to output the duration as string. Defaults to False. + + Returns: + The latency of the model forward pass. + """ + total_duration = get_module_duration(self.model) + return duration_to_string(total_duration) if as_string else total_duration + + def get_total_params(self, as_string=False): + """Returns the total number of parameters stored per rank. + + Args: + as_string (bool, optional): whether to output the parameters as string. Defaults to False. + + Returns: + The total number of parameters stored per rank. + """ + total_params = self.model.__expert_params__ + self.model.__params__ + return params_to_string(total_params) if as_string else total_params + + def is_expert_tensor_parallelism_enabled(self): + for _, module in self.model.named_modules(): + if isinstance(module, MoE) and hasattr(module, 'enable_expert_tensor_parallelism'): + return module.enable_expert_tensor_parallelism + return False + + def print_model_profile(self, profile_step=1, module_depth=-1, top_modules=1, detailed=True, output_file=None): + """Prints the model graph with the measured profile attached to each module. + + Args: + profile_step (int, optional): The global training step at which to profile. Note that warm up steps are needed for accurate time measurement. + module_depth (int, optional): The depth of the model to which to print the aggregated module information. When set to -1, it prints information from the top to the innermost modules (the maximum depth). + top_modules (int, optional): Limits the aggregated profile output to the number of top modules specified. + detailed (bool, optional): Whether to print the detailed model profile. + output_file (str, optional): Path to the output file. If None, the profiler prints to stdout. + """ + if not self.started: + return + import sys + import os.path + original_stdout = None + f = None + if output_file and output_file != "": + dir_path = os.path.dirname(os.path.abspath(output_file)) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + original_stdout = sys.stdout + f = open(output_file, "w") + sys.stdout = f + + total_flops = self.get_total_flops() + total_macs = self.get_total_macs() + total_duration = self.get_total_duration() + total_params = self.get_total_params() + expert_tensor_parallelism = None # silence the linters + total_model_expert_params = total_model_nonexpert_params = 0 + if self.ds_engine: + total_model_nonexpert_params = self.model.__params__ * self.ds_engine.mp_world_size + if self.ds_engine.has_moe_layers: + expert_tensor_parallelism = self.ds_engine.mp_world_size if self.is_expert_tensor_parallelism_enabled( + ) else 1 + total_model_expert_params = self.model.__model_expert_params__ * expert_tensor_parallelism + + self.flops = total_flops + self.macs = total_macs + self.params = total_params + + print("\n-------------------------- DeepSpeed Flops Profiler --------------------------") + print(f'Profile Summary at step {profile_step}:') + print("Notations:\n" + "data parallel size (dp_size), model parallel size(mp_size),\n" + "number of parameters (params), number of multiply-accumulate operations(MACs),\n" + "number of floating-point operations (flops), floating-point operations per second (FLOPS),\n" + "fwd latency (forward propagation latency), bwd latency (backward propagation latency),\n" + "step (weights update latency), iter latency (sum of fwd, bwd and step latency)\n") + line_fmt = '{:<70} {:<8}' + if self.ds_engine: + print(line_fmt.format('world size: ', self.ds_engine.world_size)) + print(line_fmt.format('data parallel size: ', self.ds_engine.dp_world_size)) + print(line_fmt.format('model parallel size: ', self.ds_engine.mp_world_size)) + print(line_fmt.format('batch size per GPU: ', self.ds_engine.train_micro_batch_size_per_gpu())) + if self.ds_engine.has_moe_layers: + print(line_fmt.format('expert tensor parallelism enabled: ', expert_tensor_parallelism > 1)) + + print(line_fmt.format('params per GPU: ', params_to_string(total_params))) + if total_model_expert_params > 0: + print( + line_fmt.format('params of model: ', + params_to_string(total_model_nonexpert_params + total_model_expert_params))) + print(line_fmt.format(' non-expert params of model: ', params_to_string(total_model_nonexpert_params))) + print(line_fmt.format(' expert params of model: ', params_to_string(total_model_expert_params))) + else: + print( + line_fmt.format('params of model = params per GPU * mp_size: ', + params_to_string(total_model_nonexpert_params))) + + print(line_fmt.format('fwd MACs per GPU: ', macs_to_string(total_macs))) + + print(line_fmt.format('fwd flops per GPU: ', number_to_string(total_flops))) + + print( + line_fmt.format('fwd flops of model = fwd flops per GPU * mp_size: ', + number_to_string(total_flops * (self.ds_engine.mp_world_size if self.ds_engine else 1)))) + + fwd_latency = self.get_total_duration() + if self.ds_engine and self.ds_engine.wall_clock_breakdown(): + fwd_latency = self.ds_engine.timers(FORWARD_GLOBAL_TIMER).elapsed(False) / 1000.0 + print(line_fmt.format('fwd latency: ', duration_to_string(fwd_latency))) + print( + line_fmt.format('fwd FLOPS per GPU = fwd flops per GPU / fwd latency: ', + flops_to_string(total_flops / fwd_latency))) + + if self.ds_engine and self.ds_engine.wall_clock_breakdown(): + bwd_factor = 2 + self.recompute_fwd_factor + bwd_latency = self.ds_engine.timers(BACKWARD_GLOBAL_TIMER).elapsed(False) / 1000.0 + step_latency = self.ds_engine.timers(STEP_GLOBAL_TIMER).elapsed(False) / 1000.0 + print(line_fmt.format('bwd latency: ', duration_to_string(bwd_latency))) + print( + line_fmt.format(f'bwd FLOPS per GPU = {bwd_factor:g} * fwd flops per GPU / bwd latency: ', + flops_to_string(bwd_factor * total_flops / bwd_latency))) + print( + line_fmt.format( + f'fwd+bwd FLOPS per GPU = {bwd_factor + 1:g} * fwd flops per GPU / (fwd+bwd latency): ', + flops_to_string((bwd_factor + 1) * total_flops / (fwd_latency + bwd_latency)))) + + print(line_fmt.format('step latency: ', duration_to_string(step_latency))) + + iter_latency = fwd_latency + bwd_latency + step_latency + print(line_fmt.format('iter latency: ', duration_to_string(iter_latency))) + print( + line_fmt.format(f'FLOPS per GPU = {bwd_factor + 1:g} * fwd flops per GPU / iter latency: ', + flops_to_string((bwd_factor + 1) * total_flops / iter_latency))) + + samples_per_iter = self.ds_engine.train_micro_batch_size_per_gpu() * self.ds_engine.world_size + print(line_fmt.format('samples/second: ', round(samples_per_iter / iter_latency, DEFAULT_PRECISION))) + + def flops_repr(module): + params = module.__params__ + module.__expert_params__ + flops = get_module_flops(module) + macs = get_module_macs(module) + duration = get_module_duration(module) + items = [ + "{} = {:g}% Params".format( + params_to_string(params), + round(100 * params / total_params, DEFAULT_PRECISION) if total_params else 0), + "{} = {:g}% MACs".format(macs_to_string(macs), + round(100 * macs / total_macs, DEFAULT_PRECISION) if total_macs else 0), + "{} = {:g}% latency".format( + duration_to_string(duration), + round(100 * duration / total_duration, DEFAULT_PRECISION) if total_duration else 0), + flops_to_string(round(flops / duration, DEFAULT_PRECISION) if duration else 0), + ] + original_extra_repr = module.original_extra_repr() + if original_extra_repr: + items.append(original_extra_repr) + return ", ".join(items) + + def add_extra_repr(module): + flops_extra_repr = flops_repr.__get__(module) + if module.extra_repr != flops_extra_repr: + module.original_extra_repr = module.extra_repr + module.extra_repr = flops_extra_repr + assert module.extra_repr != module.original_extra_repr + + def del_extra_repr(module): + if hasattr(module, "original_extra_repr"): + module.extra_repr = module.original_extra_repr + del module.original_extra_repr + + self.model.apply(add_extra_repr) + + print("\n----------------------------- Aggregated Profile per GPU -----------------------------") + self.print_model_aggregated_profile(module_depth=module_depth, top_modules=top_modules) + + if detailed: + print("\n------------------------------ Detailed Profile per GPU ------------------------------") + print( + "Each module profile is listed after its name in the following order: \nparams, percentage of total params, MACs, percentage of total MACs, fwd latency, percentage of total fwd latency, fwd FLOPS" + ) + print( + "\nNote: 1. A module can have torch.nn.module or torch.nn.functional to compute logits (e.g. CrossEntropyLoss). They are not counted as submodules, thus not to be printed out. However they make up the difference between a parent's MACs (or latency) and the sum of its submodules'.\n2. Number of floating-point operations is a theoretical estimation, thus FLOPS computed using that could be larger than the maximum system throughput.\n3. The fwd latency listed in the top module's profile is directly captured at the module forward function in PyTorch, thus it's less than the fwd latency shown above which is captured in DeepSpeed.\n" + ) + print(self.model) + + self.model.apply(del_extra_repr) + + print("------------------------------------------------------------------------------") + + if output_file: + sys.stdout = original_stdout + f.close() + + def print_model_aggregated_profile(self, module_depth=-1, top_modules=1): + """Prints the names of the top top_modules modules in terms of aggregated time, flops, and parameters at depth module_depth. + + Args: + module_depth (int, optional): the depth of the modules to show. Defaults to -1 (the innermost modules). + top_modules (int, optional): the number of top modules to show. Defaults to 1. + """ + info = {} + if not hasattr(self.model, "__flops__"): + print("no __flops__ attribute in the model, call this function after start_profile and before end_profile") + return + + def walk_module(module, curr_depth, info): + if curr_depth not in info: + info[curr_depth] = {} + if module.__class__.__name__ not in info[curr_depth]: + info[curr_depth][module.__class__.__name__] = [ + 0, + 0, + 0, + ] # macs, params, time + info[curr_depth][module.__class__.__name__][0] += get_module_macs(module) + info[curr_depth][module.__class__.__name__][1] += module.__params__ + module.__expert_params__ + info[curr_depth][module.__class__.__name__][2] += get_module_duration(module) + has_children = len(module._modules.items()) != 0 + if has_children: + for child in module.children(): + walk_module(child, curr_depth + 1, info) + + walk_module(self.model, 0, info) + + depth = module_depth + if module_depth == -1: + depth = len(info) - 1 + + print(f'Top {top_modules} modules in terms of params, MACs or fwd latency at different model depths:') + + for d in range(depth): + num_items = min(top_modules, len(info[d])) + + sort_macs = { + k: macs_to_string(v[0]) + for k, v in sorted(info[d].items(), key=lambda item: item[1][0], reverse=True)[:num_items] + } + sort_params = { + k: params_to_string(v[1]) + for k, v in sorted(info[d].items(), key=lambda item: item[1][1], reverse=True)[:num_items] + } + sort_time = { + k: duration_to_string(v[2]) + for k, v in sorted(info[d].items(), key=lambda item: item[1][2], reverse=True)[:num_items] + } + + print(f"depth {d}:") + print(f" params - {sort_params}") + print(f" MACs - {sort_macs}") + print(f" fwd latency - {sort_time}") + + +def _prod(dims): + p = 1 + for v in dims: + p *= v + return p + + +def _linear_flops_compute(input, weight, bias=None): + out_features = weight.shape[0] + macs = input.numel() * out_features + return 2 * macs, macs + + +def _relu_flops_compute(input, inplace=False): + return input.numel(), 0 + + +def _prelu_flops_compute(input: Tensor, weight: Tensor): + return input.numel(), 0 + + +def _elu_flops_compute(input: Tensor, alpha: float = 1.0, inplace: bool = False): + return input.numel(), 0 + + +def _leaky_relu_flops_compute(input: Tensor, negative_slope: float = 0.01, inplace: bool = False): + return input.numel(), 0 + + +def _relu6_flops_compute(input: Tensor, inplace: bool = False): + return input.numel(), 0 + + +def _silu_flops_compute(input: Tensor, inplace: bool = False): + return input.numel(), 0 + + +def _gelu_flops_compute(input, **kwargs): + return input.numel(), 0 + + +def _pool_flops_compute(input, + kernel_size, + stride=None, + padding=0, + dilation=None, + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + return_indices=None): + return input.numel(), 0 + + +def _conv_flops_compute(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + assert weight.shape[1] * groups == input.shape[1] + + batch_size = input.shape[0] + in_channels = input.shape[1] + out_channels = weight.shape[0] + kernel_dims = list(weight.shape[2:]) + input_dims = list(input.shape[2:]) + + length = len(input_dims) + + strides = stride if type(stride) is tuple else (stride, ) * length + dilations = dilation if type(dilation) is tuple else (dilation, ) * length + if isinstance(padding, str): + if padding == 'valid': + paddings = (0, ) * length + elif padding == 'same': + paddings = () + for d, k in zip(dilations, kernel_dims): + total_padding = d * (k - 1) + paddings += (total_padding // 2, ) + elif isinstance(padding, tuple): + paddings = padding + else: + paddings = (padding, ) * length + + output_dims = [] + for idx, input_dim in enumerate(input_dims): + output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] * + (kernel_dims[idx] - 1) + 1)) // strides[idx] + 1 + output_dims.append(output_dim) + + filters_per_channel = out_channels // groups + conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel + active_elements_count = batch_size * int(_prod(output_dims)) + overall_conv_macs = conv_per_position_macs * active_elements_count + overall_conv_flops = 2 * overall_conv_macs + + bias_flops = 0 + if bias is not None: + bias_flops = out_channels * active_elements_count + + return int(overall_conv_flops + bias_flops), int(overall_conv_macs) + + +def _conv_trans_flops_compute( + input, + weight, + bias=None, + stride=1, + padding=0, + output_padding=0, + groups=1, + dilation=1, +): + batch_size = input.shape[0] + in_channels = input.shape[1] + out_channels = weight.shape[1] + kernel_dims = list(weight.shape[2:]) + input_dims = list(input.shape[2:]) + + length = len(input_dims) + + paddings = padding if type(padding) is tuple else (padding, ) * length + strides = stride if type(stride) is tuple else (stride, ) * length + dilations = dilation if type(dilation) is tuple else (dilation, ) * length + + output_dims = [] + for idx, input_dim in enumerate(input_dims): + + output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] * + (kernel_dims[idx] - 1) + 1)) // strides[idx] + 1 + output_dims.append(output_dim) + + paddings = padding if type(padding) is tuple else (padding, padding) + strides = stride if type(stride) is tuple else (stride, stride) + dilations = dilation if type(dilation) is tuple else (dilation, dilation) + + filters_per_channel = out_channels // groups + conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel + active_elements_count = batch_size * int(_prod(input_dims)) + overall_conv_macs = conv_per_position_macs * active_elements_count + overall_conv_flops = 2 * overall_conv_macs + + bias_flops = 0 + if bias is not None: + bias_flops = out_channels * batch_size * int(_prod(output_dims)) + + return int(overall_conv_flops + bias_flops), int(overall_conv_macs) + + +def _batch_norm_flops_compute( + input, + running_mean, + running_var, + weight=None, + bias=None, + training=False, + momentum=0.1, + eps=1e-05, +): + has_affine = weight is not None + if training: + # estimation + return input.numel() * (5 if has_affine else 4), 0 + flops = input.numel() * (2 if has_affine else 1) + return flops, 0 + + +def _layer_norm_flops_compute( + input: Tensor, + normalized_shape: List[int], + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +): + has_affine = weight is not None + # estimation + return input.numel() * (5 if has_affine else 4), 0 + + +def _group_norm_flops_compute(input: Tensor, + num_groups: int, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5): + has_affine = weight is not None + # estimation + return input.numel() * (5 if has_affine else 4), 0 + + +def _instance_norm_flops_compute( + input: Tensor, + running_mean: Optional[Tensor] = None, + running_var: Optional[Tensor] = None, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + use_input_stats: bool = True, + momentum: float = 0.1, + eps: float = 1e-5, +): + has_affine = weight is not None + # estimation + return input.numel() * (5 if has_affine else 4), 0 + + +def _upsample_flops_compute(*args, **kwargs): + input = args[0] + size = kwargs.get('size', None) + if size is None and len(args) > 1: + size = args[1] + + if size is not None: + if isinstance(size, tuple) or isinstance(size, list): + return int(_prod(size)), 0 + else: + return int(size), 0 + + scale_factor = kwargs.get('scale_factor', None) + if scale_factor is None and len(args) > 2: + scale_factor = args[2] + assert scale_factor is not None, "either size or scale_factor should be defined" + + flops = input.numel() + if isinstance(scale_factor, tuple) and len(scale_factor) == len(input): + flops *= int(_prod(scale_factor)) + else: + flops *= scale_factor**len(input) + return flops, 0 + + +def _softmax_flops_compute(input, dim=None, _stacklevel=3, dtype=None): + return input.numel(), 0 + + +def _embedding_flops_compute( + input, + weight, + padding_idx=None, + max_norm=None, + norm_type=2.0, + scale_grad_by_freq=False, + sparse=False, +): + return 0, 0 + + +def _dropout_flops_compute(input, p=0.5, training=True, inplace=False): + return 0, 0 + + +def _matmul_flops_compute(input, other, *, out=None): + """ + Count flops for the matmul operation. + """ + macs = _prod(input.shape) * other.shape[-1] + return 2 * macs, macs + + +def _addmm_flops_compute(input, mat1, mat2, *, beta=1, alpha=1, out=None): + """ + Count flops for the addmm operation. + """ + macs = _prod(mat1.shape) * mat2.shape[-1] + return 2 * macs + _prod(input.shape), macs + + +def _einsum_flops_compute(equation, *operands): + """ + Count flops for the einsum operation. + """ + equation = equation.replace(" ", "") + input_shapes = [o.shape for o in operands] + + # Re-map equation so that same equation with different alphabet + # representations will look the same. + letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys() + mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)} + equation = equation.translate(mapping) + + np_arrs = [np.zeros(s) for s in input_shapes] + optim = np.einsum_path(equation, *np_arrs, optimize="optimal")[1] + for line in optim.split("\n"): + if "optimized flop" in line.lower(): + flop = int(float(line.split(":")[-1])) + return flop, 0 + raise NotImplementedError("Unsupported einsum operation.") + + +def _tensor_addmm_flops_compute(self, mat1, mat2, *, beta=1, alpha=1, out=None): + """ + Count flops for the tensor addmm operation. + """ + macs = _prod(mat1.shape) * mat2.shape[-1] + return 2 * macs + _prod(self.shape), macs + + +def _mul_flops_compute(input, other, *, out=None): + return _elementwise_flops_compute(input, other) + + +def _add_flops_compute(input, other, *, alpha=1, out=None): + return _elementwise_flops_compute(input, other) + + +def _elementwise_flops_compute(input, other): + if not torch.is_tensor(input): + if torch.is_tensor(other): + return _prod(other.shape), 0 + else: + return 1, 0 + elif not torch.is_tensor(other): + return _prod(input.shape), 0 + else: + dim_input = len(input.shape) + dim_other = len(other.shape) + max_dim = max(dim_input, dim_other) + + final_shape = [] + for i in range(max_dim): + in_i = input.shape[i] if i < dim_input else 1 + ot_i = other.shape[i] if i < dim_other else 1 + if in_i > ot_i: + final_shape.append(in_i) + else: + final_shape.append(ot_i) + flops = _prod(final_shape) + return flops, 0 + + +def _attn_flops_compute(q, k, v, *args, **kwargs): + """ + Count flops for the scaled_dot_product_attention operation. + """ + macs = _prod(q.shape) * k.shape[-2] + macs += _prod(q.shape[:-1]) * k.shape[-2] * v.shape[-1] + return 2 * macs, macs + + +def wrapFunc(func, funcFlopCompute): + oldFunc = func + name = func.__str__ + old_functions[name] = oldFunc + + def newFunc(*args, **kwds): + flops, macs = funcFlopCompute(*args, **kwds) + if module_flop_count: + module_flop_count[-1].append((name, flops)) + if module_mac_count and macs: + module_mac_count[-1].append((name, macs)) + return oldFunc(*args, **kwds) + + newFunc.__str__ = func.__str__ + + return newFunc + + +def _patch_functionals(): + # FC + F.linear = wrapFunc(F.linear, _linear_flops_compute) + + # convolutions + F.conv1d = wrapFunc(F.conv1d, _conv_flops_compute) + F.conv2d = wrapFunc(F.conv2d, _conv_flops_compute) + F.conv3d = wrapFunc(F.conv3d, _conv_flops_compute) + + # conv transposed + F.conv_transpose1d = wrapFunc(F.conv_transpose1d, _conv_trans_flops_compute) + F.conv_transpose2d = wrapFunc(F.conv_transpose2d, _conv_trans_flops_compute) + F.conv_transpose3d = wrapFunc(F.conv_transpose3d, _conv_trans_flops_compute) + + # activations + F.relu = wrapFunc(F.relu, _relu_flops_compute) + F.prelu = wrapFunc(F.prelu, _prelu_flops_compute) + F.elu = wrapFunc(F.elu, _elu_flops_compute) + F.leaky_relu = wrapFunc(F.leaky_relu, _leaky_relu_flops_compute) + F.relu6 = wrapFunc(F.relu6, _relu6_flops_compute) + if hasattr(F, "silu"): + F.silu = wrapFunc(F.silu, _silu_flops_compute) + F.gelu = wrapFunc(F.gelu, _gelu_flops_compute) + + # Normalizations + F.batch_norm = wrapFunc(F.batch_norm, _batch_norm_flops_compute) + F.layer_norm = wrapFunc(F.layer_norm, _layer_norm_flops_compute) + F.instance_norm = wrapFunc(F.instance_norm, _instance_norm_flops_compute) + F.group_norm = wrapFunc(F.group_norm, _group_norm_flops_compute) + + # poolings + F.avg_pool1d = wrapFunc(F.avg_pool1d, _pool_flops_compute) + F.avg_pool2d = wrapFunc(F.avg_pool2d, _pool_flops_compute) + F.avg_pool3d = wrapFunc(F.avg_pool3d, _pool_flops_compute) + F.max_pool1d = wrapFunc(F.max_pool1d, _pool_flops_compute) + F.max_pool2d = wrapFunc(F.max_pool2d, _pool_flops_compute) + F.max_pool3d = wrapFunc(F.max_pool3d, _pool_flops_compute) + F.adaptive_avg_pool1d = wrapFunc(F.adaptive_avg_pool1d, _pool_flops_compute) + F.adaptive_avg_pool2d = wrapFunc(F.adaptive_avg_pool2d, _pool_flops_compute) + F.adaptive_avg_pool3d = wrapFunc(F.adaptive_avg_pool3d, _pool_flops_compute) + F.adaptive_max_pool1d = wrapFunc(F.adaptive_max_pool1d, _pool_flops_compute) + F.adaptive_max_pool2d = wrapFunc(F.adaptive_max_pool2d, _pool_flops_compute) + F.adaptive_max_pool3d = wrapFunc(F.adaptive_max_pool3d, _pool_flops_compute) + + # upsample + F.upsample = wrapFunc(F.upsample, _upsample_flops_compute) + F.interpolate = wrapFunc(F.interpolate, _upsample_flops_compute) + + # softmax + F.softmax = wrapFunc(F.softmax, _softmax_flops_compute) + + # embedding + F.embedding = wrapFunc(F.embedding, _embedding_flops_compute) + + # attn + F.scaled_dot_product_attention = wrapFunc(F.scaled_dot_product_attention, _attn_flops_compute) + + +def _patch_tensor_methods(): + torch.matmul = wrapFunc(torch.matmul, _matmul_flops_compute) + torch.Tensor.matmul = wrapFunc(torch.Tensor.matmul, _matmul_flops_compute) + torch.Tensor.__matmul__ = wrapFunc(torch.Tensor.__matmul__, _matmul_flops_compute) + torch.mm = wrapFunc(torch.mm, _matmul_flops_compute) + torch.Tensor.mm = wrapFunc(torch.Tensor.mm, _matmul_flops_compute) + torch.bmm = wrapFunc(torch.bmm, _matmul_flops_compute) + torch.Tensor.bmm = wrapFunc(torch.Tensor.bmm, _matmul_flops_compute) + + torch.addmm = wrapFunc(torch.addmm, _addmm_flops_compute) + torch.Tensor.addmm = wrapFunc(torch.Tensor.addmm, _tensor_addmm_flops_compute) + + torch.mul = wrapFunc(torch.mul, _mul_flops_compute) + torch.Tensor.mul = wrapFunc(torch.Tensor.mul, _mul_flops_compute) + + torch.add = wrapFunc(torch.add, _add_flops_compute) + torch.Tensor.add = wrapFunc(torch.Tensor.add, _add_flops_compute) + + torch.einsum = wrapFunc(torch.einsum, _einsum_flops_compute) + + torch.baddbmm = wrapFunc(torch.baddbmm, _tensor_addmm_flops_compute) + + +def _reload_functionals(): + # torch.nn.functional does not support importlib.reload() + F.linear = old_functions[F.linear.__str__] + F.conv1d = old_functions[F.conv1d.__str__] + F.conv2d = old_functions[F.conv2d.__str__] + F.conv3d = old_functions[F.conv3d.__str__] + F.conv_transpose1d = old_functions[F.conv_transpose1d.__str__] + F.conv_transpose2d = old_functions[F.conv_transpose2d.__str__] + F.conv_transpose3d = old_functions[F.conv_transpose3d.__str__] + F.relu = old_functions[F.relu.__str__] + F.prelu = old_functions[F.prelu.__str__] + F.elu = old_functions[F.elu.__str__] + F.leaky_relu = old_functions[F.leaky_relu.__str__] + F.relu6 = old_functions[F.relu6.__str__] + if hasattr(F, "silu"): + F.silu = old_functions[F.silu.__str__] + F.gelu = old_functions[F.gelu.__str__] + F.batch_norm = old_functions[F.batch_norm.__str__] + F.layer_norm = old_functions[F.layer_norm.__str__] + F.instance_norm = old_functions[F.instance_norm.__str__] + F.group_norm = old_functions[F.group_norm.__str__] + F.avg_pool1d = old_functions[F.avg_pool1d.__str__] + F.avg_pool2d = old_functions[F.avg_pool2d.__str__] + F.avg_pool3d = old_functions[F.avg_pool3d.__str__] + F.max_pool1d = old_functions[F.max_pool1d.__str__] + F.max_pool2d = old_functions[F.max_pool2d.__str__] + F.max_pool3d = old_functions[F.max_pool3d.__str__] + F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__str__] + F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__str__] + F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__str__] + F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__str__] + F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__str__] + F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__str__] + F.upsample = old_functions[F.upsample.__str__] + F.interpolate = old_functions[F.interpolate.__str__] + F.softmax = old_functions[F.softmax.__str__] + F.embedding = old_functions[F.embedding.__str__] + + +def _reload_tensor_methods(): + torch.matmul = old_functions[torch.matmul.__str__] + torch.Tensor.matmul = old_functions[torch.Tensor.matmul.__str__] + torch.mm = old_functions[torch.mm.__str__] + torch.Tensor.mm = old_functions[torch.Tensor.mm.__str__] + torch.bmm = old_functions[torch.matmul.__str__] + torch.Tensor.bmm = old_functions[torch.Tensor.bmm.__str__] + torch.addmm = old_functions[torch.addmm.__str__] + torch.Tensor.addmm = old_functions[torch.Tensor.addmm.__str__] + torch.mul = old_functions[torch.mul.__str__] + torch.Tensor.mul = old_functions[torch.Tensor.mul.__str__] + torch.add = old_functions[torch.add.__str__] + torch.Tensor.add = old_functions[torch.Tensor.add.__str__] + + torch.einsum = old_functions[torch.einsum.__str__] + + torch.baddbmm = old_functions[torch.baddbmm.__str__] + + +def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size): + gates_size = w_ih.shape[0] + # matrix matrix mult ih state and internal state + flops += 2 * w_ih.shape[0] * w_ih.shape[1] - gates_size + # matrix matrix mult hh state and internal state + flops += 2 * w_hh.shape[0] * w_hh.shape[1] - gates_size + if isinstance(rnn_module, (nn.RNN, nn.RNNCell)): + # add both operations + flops += rnn_module.hidden_size + elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)): + # hadamard of r + flops += rnn_module.hidden_size + # adding operations from both states + flops += rnn_module.hidden_size * 3 + # last two hadamard _product and add + flops += rnn_module.hidden_size * 3 + elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)): + # adding operations from both states + flops += rnn_module.hidden_size * 4 + # two hadamard _product and add for C state + flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size + # final hadamard + flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size + return flops + + +def _rnn_forward_hook(rnn_module, input, output): + flops = 0 + # input is a tuple containing a sequence to process and (optionally) hidden state + inp = input[0] + batch_size = inp.shape[0] + seq_length = inp.shape[1] + num_layers = rnn_module.num_layers + + for i in range(num_layers): + w_ih = rnn_module.__getattr__("weight_ih_l" + str(i)) + w_hh = rnn_module.__getattr__("weight_hh_l" + str(i)) + if i == 0: + input_size = rnn_module.input_size + else: + input_size = rnn_module.hidden_size + flops = _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size) + if rnn_module.bias: + b_ih = rnn_module.__getattr__("bias_ih_l" + str(i)) + b_hh = rnn_module.__getattr__("bias_hh_l" + str(i)) + flops += b_ih.shape[0] + b_hh.shape[0] + + flops *= batch_size + flops *= seq_length + if rnn_module.bidirectional: + flops *= 2 + rnn_module.__flops__ += int(flops) + + +def _rnn_cell_forward_hook(rnn_cell_module, input, output): + flops = 0 + inp = input[0] + batch_size = inp.shape[0] + w_ih = rnn_cell_module.__getattr__("weight_ih") + w_hh = rnn_cell_module.__getattr__("weight_hh") + input_size = inp.shape[1] + flops = _rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size) + if rnn_cell_module.bias: + b_ih = rnn_cell_module.__getattr__("bias_ih") + b_hh = rnn_cell_module.__getattr__("bias_hh") + flops += b_ih.shape[0] + b_hh.shape[0] + + flops *= batch_size + rnn_cell_module.__flops__ += int(flops) + + +MODULE_HOOK_MAPPING = { + # RNN + nn.RNN: _rnn_forward_hook, + nn.GRU: _rnn_forward_hook, + nn.LSTM: _rnn_forward_hook, + nn.RNNCell: _rnn_cell_forward_hook, + nn.LSTMCell: _rnn_cell_forward_hook, + nn.GRUCell: _rnn_cell_forward_hook, +} + + +def macs_to_string(macs, units=None, precision=DEFAULT_PRECISION): + return f"{number_to_string(macs, units=units, precision=precision)}MACs" + + +def number_to_string(num, units=None, precision=DEFAULT_PRECISION): + if units is None: + if num >= 1e12: + magnitude, units = 1e12, "T" + elif num >= 1e9: + magnitude, units = 1e9, "G" + elif num >= 1e6: + magnitude, units = 1e6, "M" + elif num >= 1e3: + magnitude, units = 1e3, "K" + elif num >= 1 or num == 0: + magnitude, units = 1, "" + elif num >= 1e-3: + magnitude, units = 1e-3, "m" + else: + magnitude, units = 1e-6, "u" + else: + if units == "T": + magnitude = 1e12 + elif units == "G": + magnitude = 1e9 + elif units == "M": + magnitude = 1e6 + elif units == "K": + magnitude = 1e3 + elif units == "m": + magnitude = 1e-3 + elif units == "u": + magnitude = 1e-6 + else: + magnitude = 1 + return f"{round(num / magnitude, precision):g} {units}" + + +def flops_to_string(flops, units=None, precision=DEFAULT_PRECISION): + return f"{number_to_string(flops, units=units, precision=precision)}FLOPS" + + +def bytes_to_string(b, units=None, precision=DEFAULT_PRECISION): + return f"{number_to_string(b, units=units, precision=precision)}B" + + +def params_to_string(params_num, units=None, precision=DEFAULT_PRECISION): + units = units.replace("B", "G") if units else units + return number_to_string(params_num, units=units, precision=precision).replace("G", "B").strip() + + +def duration_to_string(duration, units=None, precision=DEFAULT_PRECISION): + return f"{number_to_string(duration, units=units, precision=precision)}s" + + + # can not iterate over all submodules using self.model.modules() + # since modules() returns duplicate modules only once +def get_module_flops(module): + sum = module.__flops__ + # iterate over immediate children modules + for child in module.children(): + sum += get_module_flops(child) + return sum + + +def get_module_macs(module): + sum = module.__macs__ + # iterate over immediate children modules + for child in module.children(): + sum += get_module_macs(child) + return sum + + +def get_module_duration(module): + duration = module.__duration__ + if duration == 0: # e.g. ModuleList + for m in module.children(): + duration += get_module_duration(m) + return duration + + +def get_model_profile(model, + input_shape=None, + args=[], + kwargs={}, + print_profile=True, + detailed=True, + module_depth=-1, + top_modules=1, + warm_up=1, + as_string=True, + output_file=None, + ignore_modules=None, + mode='forward'): + """Returns the total floating-point operations, MACs, and parameters of a model. + + Example: + + .. code-block:: python + + model = torchvision.models.alexnet() + batch_size = 256 + flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224))) + + Args: + model ([torch.nn.Module]): the PyTorch model to be profiled. + input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument. + args (list): list of positional arguments to the model. + kwargs (dict): dictionary of keyword arguments to the model. + print_profile (bool, optional): whether to print the model profile. Defaults to True. + detailed (bool, optional): whether to print the detailed model profile. Defaults to True. + module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules). + top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3. + warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1. + as_string (bool, optional): whether to print the output as string. Defaults to True. + output_file (str, optional): path to the output file. If None, the profiler prints to stdout. + ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None. + + Returns: + The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model. + """ + assert isinstance(model, nn.Module), "model must be a PyTorch module" + prof = FlopsProfiler(model) + model.eval() + + if input_shape is not None: + assert type(input_shape) is tuple, "input_shape must be a tuple" + assert len(input_shape) >= 1, "input_shape must have at least one element" + try: + input = torch.ones(()).new_empty( + (*input_shape, ), + dtype=next(model.parameters()).dtype, + device=next(model.parameters()).device, + ) + except StopIteration: + input = torch.ones(()).new_empty((*input_shape, )) + + args = [input] + assert (len(args) > 0) or (len(kwargs) > 0), "args and/or kwargs must be specified if input_shape is None" + + logger.info("Flops profiler warming-up...") + for _ in range(warm_up): + if kwargs: + if mode == 'forward': + _ = model(*args, **kwargs) + if mode == 'generate': + _ = model.generate(*args, **kwargs) + else: + if mode == 'forward': + _ = model(*args) + if mode == 'generate': + _ = model.generate(*args) + prof.start_profile(ignore_list=ignore_modules) + + if kwargs: + if mode == 'forward': + _ = model(*args, **kwargs) + if mode == 'generate': + _ = model.generate(*args, **kwargs) + else: + if mode == 'forward': + _ = model(*args) + if mode == 'generate': + _ = model.generate(*args) + + flops = prof.get_total_flops() + macs = prof.get_total_macs() + params = prof.get_total_params() + if print_profile: + prof.print_model_profile(profile_step=warm_up, + module_depth=module_depth, + top_modules=top_modules, + detailed=detailed, + output_file=output_file) + + prof.end_profile() + if as_string: + return number_to_string(flops), macs_to_string(macs), params_to_string(params) + + return flops, macs, params diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..2a21cf7ca17a4a7648025fcea70acaf534db0161 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -0,0 +1,1163 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Use to partition the activations stored for backward propagation +Therefore reduces the memory consumption +Also implements CPU checkpointing and contiguous memory checkpointing +Reduces memory consumption and memory fragmentation + +Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py +b886b7bb972afe72bac0f5de4f42a4a7bae8ebef +""" + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch +import copy +import torch +import contextlib +from deepspeed import comm as dist +import weakref + +import mmap +from torch import _C + +from deepspeed.runtime.config import DeepSpeedConfig +from deepspeed.utils import logger +from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage +from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers, FORWARD_GLOBAL_TIMER +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank +from deepspeed.accelerator import get_accelerator + +# DeepSpeed Checkpointing Enabled or Disabled +deepspeed_checkpointing_enabled = False + +# MP parameters +mpu = None +mp_rank = None +mp_size = None +mp_group = None + +# Model Parameters +num_layers = None + +# Checkpointing buffers +contiguous_data_buffers = [] +data_offsets = [] + +contiguous_size_buffers = [] +size_offsets = [] + +timers = None + +# optimization flags +PARTITION_ACTIVATIONS = False +CPU_CHECKPOINT = False +CONTIGUOUS_CHECKPOINTING = False +SYNCHRONIZE = False +PROFILE_TIME = False + +# Default name for the model parallel rng tracker. +_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' +transport_stream = None +cuda_device = None + + +def detach_variable(inputs, device=None): + if isinstance(inputs, tuple): + out = [] + for inp in inputs: + if not isinstance(inp, torch.Tensor): + out.append(inp) + continue + + requires_grad = inp.requires_grad + + if device is not None: + x = inp.to(device=device) + else: + x = inp + + x = x.detach() + x.requires_grad = requires_grad + out.append(x) + return tuple(out) + else: + raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) + + +def _set_cuda_rng_state(new_state, device=-1): + """Sets the random number generator state of the current GPU. + + Arguments: + new_state (torch.ByteTensor): The desired state + This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda + with a single change: the input state is not cloned. Cloning caused + major performance issues for +4 GPU cases. + """ + if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): + # older PyTorch + def cb(): + with get_accelerator().device(device): + _C._cuda_setRNGState(new_state) + else: + # newer PyTorch + if device == -1: + device = torch.device(get_accelerator().device_name()) + elif isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(get_accelerator().device_name(), device) + + def cb(): + idx = device.index + if idx is None: + idx = get_accelerator().current_device() + default_generator = get_accelerator().default_generator(idx) + default_generator.set_state(new_state) + + get_accelerator().lazy_call(cb) + + +class CudaRNGStatesTracker: + """Tracker for the cuda RNG states. + + Using the `add` method, a cuda rng state is initialized based on + the input `seed` and is assigned to `name`. Later, by forking the + rng state, we can perform operations and return to our starting + cuda state. + """ + + def __init__(self): + # Map from a string name to the cuda rng state. + self.states_ = {} + # Seeds are just for book keeping and ensure no seed is set twice. + self.seeds_ = set() + + def reset(self): + """Set to the initial state (no tracker).""" + self.states_ = {} + self.seeds_ = set() + + def get_states(self): + """Get rng states. Copy the dictionary so we have direct + pointers to the states, not just a pointer to the dictionary.""" + return copy.copy(self.states_) + + def set_states(self, states): + """Set the rng states. For efficiency purposes, we do not check + the size of seed for compatibility.""" + self.states_ = states + + def add(self, name, seed): + """Track the rng state.""" + # Check seed is not already used. + if seed in self.seeds_: + raise Exception('seed {} already exists'.format(seed)) + self.seeds_.add(seed) + # Check that state is not already defined. + if name in self.states_: + raise Exception('cuda rng state {} already exists'.format(name)) + # Get the current rng state. + orig_rng_state = get_accelerator().get_rng_state() + # Set the new state and store it. + get_accelerator().manual_seed(seed) + self.states_[name] = get_accelerator().get_rng_state() + # Reset rng state to what it was. + _set_cuda_rng_state(orig_rng_state) + + @contextlib.contextmanager + def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME): + """Fork the cuda rng state, perform operations, and exit with + the original state.""" + # Check if we have added the state + if name not in self.states_: + raise Exception('cuda rng state {} is not added'.format(name)) + # Store current rng state. + orig_cuda_rng_state = get_accelerator().get_rng_state() + # Set rng state to the desired one + _set_cuda_rng_state(self.states_[name]) + # Do the stuff we wanted to do. + try: + yield + finally: + # Update the current rng state for later use. + self.states_[name] = get_accelerator().get_rng_state() + # And set the state to the original state we started with. + _set_cuda_rng_state(orig_cuda_rng_state) + + +# RNG tracker object. +_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker() + + +def get_cuda_rng_tracker(): + """Get cuda rng tracker.""" + return _CUDA_RNG_STATE_TRACKER + + +def model_parallel_cuda_manual_seed(seed): + """Initialize model parallel cuda seed. + + This function should be called after the model parallel is + initialized. Also, no get_accelerator().manual_seed should be called + after this function. Basically, this is replacement for that + function. + Two set of RNG states are tracked: + default state: This is for data parallelism and is the same among a + set of model parallel GPUs but different across + different model parallel groups. This is used for + example for dropout in the non-model-parallel regions. + model-parallel state: This state is different among a set of model + parallel GPUs, but the same across data parallel + groups. This is used for example for dropout in + model parallel regions. + """ + global mpu + + tp_rank = bwc_tensor_model_parallel_rank(mpu) + + # 2718 is just for fun and any POSITIVE value will work. + offset = seed + 2718 + model_parallel_seed = offset + tp_rank + # Data parallel gets the original seed. + data_parallel_seed = seed + + if dist.get_rank() == 0: + logger.info( + '> initializing model parallel cuda seeds on global rank {}, ' + 'model parallel rank {}, and data parallel rank {} with ' + 'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank, + mpu.get_data_parallel_rank(), + model_parallel_seed, data_parallel_seed), ) + _CUDA_RNG_STATE_TRACKER.reset() + # Set the default state. + get_accelerator().manual_seed(data_parallel_seed) + # and model parallel state. + _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed) + + +def model_parallel_reconfigure_tp_seed(seed): + global mpu + tp_rank = bwc_tensor_model_parallel_rank(mpu) + model_parallel_seed = seed + 2718 + tp_rank + with _CUDA_RNG_STATE_TRACKER.fork(): + get_accelerator().manual_seed(model_parallel_seed) + + +def get_partition_start(item): + global mp_rank, mp_size, mp_group + size = item.numel() + partition_size = size / mp_size + start = partition_size * mp_rank + return int(start) + + +def get_partition_size(item): + global mp_rank, mp_size, mp_group + size = item.numel() + assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size" + partition_size = size / mp_size + return int(partition_size) + + +def gather_partitioned_activations(tensors, device=None): + global mp_rank, mp_size, mp_group + assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}' + inputs = [] + num_args = int(len(tensors) / 2) + for i in range(num_args): + + item = tensors[2 * i] + size = tensors[2 * i + 1] + + if not is_activation_to_checkpoint(item): + inputs.append(item) + continue + + # don't need to do all_gather if model parallel is not enabled + if mp_group is None or mp_size == 1: + item = item.view(list(size.numpy())) + if device is not None: + item = item.to(device) + inputs.append(item) + continue + + partition_size = item.numel() + tensor_size = partition_size * mp_size + if device is not None: + flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device) + else: + flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device) + part = flat_tensor.narrow(0, partition_size * mp_rank, partition_size) + part.copy_(item) + dist.all_gather_into_tensor(flat_tensor, part, group=mp_group) + input_tensor = flat_tensor.view(list(size.numpy())) + item.data = input_tensor.data + + inputs.append(item) + + return tuple(inputs) + + +def extract_tensors(all_objects): + """ + Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation. + The order of tensors and non-tensors is preserved in their respective output groups. + + Parameters: + all_objects (list/tuple): Objects containing tensors and non-tensors to be split. + + Returns: + tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor. + + """ + tensor_objects = [v for v in all_objects if torch.is_tensor(v)] + non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)] + tensor_flags = [torch.is_tensor(v) for v in all_objects] + if type(all_objects) is tuple: + return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags) + return tensor_objects, non_tensor_objects, tensor_flags + + +def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags): + """ + Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple). + + Parameters: + tensor_objects (list/tuple): Tensors to merge. + non_tensor_objects (list/tuple): Non-tensors to merge. + tensor_flags (list/tuple): Indicates whether each position in output is a tensor. + + Returns: + tuple: Merge of tensors and non-tensors + """ + merged_objects = [] + tensor_idx = 0 + non_tensor_idx = 0 + + real_tensor_flags = None + + # remove the flags that are assigned to the size of the flattened tensors + if PARTITION_ACTIVATIONS: + real_tensor_flags = [] + previous_flag = False + for flag in tensor_flags: + if previous_flag: + previous_flag = False + continue + previous_flag = flag + real_tensor_flags.append(flag) + else: + real_tensor_flags = tensor_flags + + for is_tensor in real_tensor_flags: + if is_tensor: + merged_objects.append(tensor_objects[tensor_idx]) + tensor_idx += 1 + else: + merged_objects.append(non_tensor_objects[non_tensor_idx]) + non_tensor_idx += 1 + + return tuple(merged_objects) + + +def is_activation_to_checkpoint(item): + """ + Is an activation to be checkpointed + """ + global mp_size + return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size + + +def partition_activations(args, cpu_checkpoint, contiguous_checkpoint): + global contiguous_data_buffers, data_offsets + + inputs = [] + num_non_fp_tensors = 0 + + for arg_index, item in enumerate(args): + if not is_activation_to_checkpoint(item): + inputs.append(item) + num_non_fp_tensors += 1 + continue + + i = arg_index - num_non_fp_tensors + partition_size = get_partition_size(item) + partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone() + + buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device + + if contiguous_checkpoint: + if i >= len(contiguous_data_buffers): + tensor_list = [ + torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) + for _ in range(num_layers) + ] + contiguous_data_buffers.append(tensor_list) + data_offsets.append(0) + elif contiguous_data_buffers[i] is None: + tensor_list = [ + torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) + for _ in range(num_layers) + ] + contiguous_data_buffers[i] = tensor_list + data_offsets[i] = 0 + + # Because the 'new_empty' returns uninitialized pages, + # the pages need to be populated during the cudaMemcpy time + # which increases the data copy time. To avoid this, we + # pre-populate these pages by simply writing 0 ahead of + # the actual cudaMemcpy operation time. Due to the + # previously launched GPU kernels, there is a small + # window of time here for CPUs to populate pages asynchronously. + contiguous_data_buffers[i][data_offsets[i]].data[range( + 0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0], + int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0 + + contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data) + data_offsets[i] = data_offsets[i] + 1 + inputs.append(contiguous_partition) + else: + partition = partition.cpu() if CPU_CHECKPOINT else partition + inputs.append(partition) + + return inputs + + +def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint): + global contiguous_size_buffers, size_offsets + + new_args = [] + num_non_fp_tensors = 0 + + for arg_index, (arg, inp) in enumerate(zip(args, inputs)): + size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None + if not is_activation_to_checkpoint(arg): + new_args.append(arg) + new_args.append(size) + num_non_fp_tensors += 1 + continue + + arg.data = torch.empty([], device=arg.device).data + arg.saved_data = inp.data + + new_args.append(arg) + i = arg_index - num_non_fp_tensors + + if contiguous_checkpoint: + numel = size.numel() + if i >= len(contiguous_size_buffers): + tmp = torch.tensor(()) + contiguous_size_buffers.append( + tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)) + size_offsets.append(0) + elif contiguous_size_buffers[i] is None: + tmp = torch.tensor(()) + contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device) + size_offsets[i] = 0 + + contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data) + contiguous_size = contiguous_size.view_as(size) + size_offsets[i] = size_offsets[i] + numel + new_args.append(contiguous_size) + else: + new_args.append(size) + + return new_args + + +def get_cpu_activations_for_backward(args, inputs): + new_args = [] + for i, (arg, inp) in enumerate(zip(args, inputs)): + if not is_activation_to_checkpoint(arg): + new_args.append(arg) + continue + + arg.data = torch.empty([], device=arg.device).data + arg.saved_data = inp.data + new_args.append(arg) + + return new_args + + +class CheckpointFunction(torch.autograd.Function): + """This function is adapted from torch.utils.checkpoint with + two main changes: + 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda + 2) the states in the model parallel tracker are also properly + tracked/set/reset. + 3) Performance activation partitioning, contiguous memory optimization + 4) CPU Checkpointing + 5) Profile forward and backward functions + """ + + @staticmethod + def forward(ctx, run_function, all_outputs, *args): + global mpu, timers, SYNCHRONIZE, PROFILE_TIME + + def save_args_for_backward(*all_args): + tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args) + ctx.deepspeed_saved_tensors = tensor_args + ctx.non_tensor_args = non_tensor_args + ctx.tensor_flags = tensor_flags + + if SYNCHRONIZE: + get_accelerator().synchronize() + + if timers is None and PROFILE_TIME: + timers = Timers() + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).start() + + ctx.run_function = run_function + global num_layers + global mp_rank, mp_size, mp_group + global contiguous_data_buffers, contiguous_size_buffers + global data_offsets, size_offsets + if mp_rank is None: + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + else: + mp_rank = 0 + mp_size = 1 + mp_group = None + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset + + if cuda_device is None: + see_memory_usage("First Forward Beginning", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info( + f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) + + if PARTITION_ACTIVATIONS: + inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) + elif CPU_CHECKPOINT: + inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint) + + # just in case something funky is happening such as reuse of inputs + inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint) + + # Copy the rng states. + ctx.fwd_cpu_rng_state = torch.get_rng_state() + ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state() + ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + see_memory_usage("Before running forward on the layer", force=False) + # ctx.save_for_backward(*args) + with torch.no_grad(): + outputs = run_function(*inputs_cuda) + + see_memory_usage("After running forward on the layer", force=False) + del inputs_cuda + + if PARTITION_ACTIVATIONS: + new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING) + assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}' + save_args_for_backward(*new_args) + elif CPU_CHECKPOINT: + new_args = get_cpu_activations_for_backward(args, inputs) + save_args_for_backward(*new_args) + else: + save_args_for_backward(*args) + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).stop() + timers.log([FORWARD_GLOBAL_TIMER]) + if SYNCHRONIZE: + get_accelerator().synchronize() + + # Tensors returned from forward() may not be differentiable. + if torch.is_tensor(outputs): + non_grad_outputs = [outputs] if not outputs.is_floating_point() else [] + else: + non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()] + ctx.mark_non_differentiable(*non_grad_outputs) + + if torch.is_tensor(outputs): + all_outputs += [outputs] + return outputs + else: + all_outputs += outputs + outputs, _, _ = extract_tensors(all_objects=outputs) + return tuple(outputs) + + @staticmethod + def backward(ctx, *grads): + global timers + see_memory_usage("In backward", force=False) + # removing pointers to the contiguous buffer memory + # so that they can be garbage collected once the checkpoints + # have been used + if SYNCHRONIZE: + get_accelerator().synchronize() + if PROFILE_TIME: + timers('backward').start() + + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + see_memory_usage("In backward checkpointing code", force=False) + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS + + # Rebuild deepspeed_saved_tensors + for t in ctx.deepspeed_saved_tensors: + if t is not None and hasattr(t, 'saved_data') and t.saved_data is not None: + t.data = t.saved_data.to(t.device) + t.saved_data = None + + if PARTITION_ACTIVATIONS: + # with get_accelerator().stream(transport_stream): + inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors, + device=cuda_device if CPU_CHECKPOINT else None) + detached_inputs = detach_variable(inputs) + elif CPU_CHECKPOINT: + inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint) + detached_inputs = detach_variable(inputs) + else: + inputs = ctx.deepspeed_saved_tensors + detached_inputs = detach_variable(inputs) + + # Add non tensor input args + detached_inputs = merge_tensors(tensor_objects=detached_inputs, + non_tensor_objects=ctx.non_tensor_args, + tensor_flags=ctx.tensor_flags) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(ctx.fwd_cpu_rng_state) + _set_cuda_rng_state(ctx.fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) + + # if PARTITION_ACTIVATIONS: + # current_stream=get_accelerator().current_stream() + # current_stream.wait_stream(transport_stream) + + see_memory_usage("In backward checkpointing code before forward", force=False) + + with torch.enable_grad(): + outputs = ctx.run_function(*detached_inputs) + + see_memory_usage("In backward checkpointing code after forward", force=False) + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs, ) + + # Filter out non tensor outputs + outputs, _, _ = extract_tensors(all_objects=outputs) + + # Construct arguments to autograd.backward(). + # This is usually just outputs and grads, but forward() can return tensors that + # are not differentiable. + output_tensors = [] + grad_tensors = [] + for out, grad in zip(outputs, grads): + if out.requires_grad: + output_tensors.append(out) + grad_tensors.append(grad) + + see_memory_usage("In backward checkpointing code before backward", force=False) + + torch.autograd.backward(output_tensors, grad_tensors) + + # Force clear our stashed tensors to prevent a memory leak in certain scenarios + ctx.deepspeed_saved_tensors = None + ctx.non_tensor_args = None + ctx.tensor_flags = None + + see_memory_usage("After backward checkpointing code after backward", force=False) + + if PROFILE_TIME: + timers('backward').stop() + timers.log(['backward']) + if SYNCHRONIZE: + get_accelerator().synchronize() + ret_list = [None, None] # first None for ctx + for inp in detached_inputs: + if torch.is_tensor(inp): + ret_list.append(inp.grad) + else: + ret_list.append(None) + + return tuple(ret_list) + + +def non_reentrant_checkpoint(function, *args): + """This function is union of `torch.utils.checkpoint._checkpoint_without_reentrant` and `CheckpointFunction` in this module + + This function is aim to solve the back probagation error raised from all input requires no grad. + * has already been implemented in pytorch for a while, the solution is stable at most time except for jit module mode. + * can help to solve the issue which is hacked by `deepspeed.runtime.pipe.module.PipelineModule._is_checkpointable` + + Main modifications compared to the implementation of torch: + 1. adapt to the signature of `checkpoint` function in this module + 2. solve the non-deterministic by random state management consistent with deepspeed `CheckpointFunction` + 3. when there is partition or cpu checkpointing, gather them in the unpack_hook during back probagation + 4. make all after backward blocks in the hook which will executed after all leaf nodes backward execution. + 5. above 4. is inspired by `torch.autograd.graph.register_multi_grad_hook`, which is only implemented after 2.0.0 + """ + global mpu, timers, SYNCHRONIZE, PROFILE_TIME + + deepspeed_saved_tensors = None + non_tensor_args = None + tensor_flags = None + + def save_args_for_backward(*all_args): + """keep this function to reduce the modification from original implementation""" + nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags + tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args) + deepspeed_saved_tensors = tensor_args + non_tensor_args = non_tensor_args + tensor_flags = tensor_flags + + if SYNCHRONIZE: + get_accelerator().synchronize() + + if timers is None and PROFILE_TIME: + timers = Timers() + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).start() + + global num_layers + global mp_rank, mp_size, mp_group + global contiguous_data_buffers, contiguous_size_buffers + global data_offsets, size_offsets + if mp_rank is None: + if mpu is not None: + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + mp_rank = mpu.get_tensor_model_parallel_rank() + mp_size = mpu.get_tensor_model_parallel_world_size() + mp_group = mpu.get_tensor_model_parallel_group() + else: + mp_rank = mpu.get_model_parallel_rank() + mp_size = mpu.get_model_parallel_world_size() + mp_group = mpu.get_model_parallel_group() + else: + mp_rank = 0 + mp_size = 1 + mp_group = None + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset + + if cuda_device is None: + see_memory_usage("First Forward Beginning", force=False) + if dist.get_rank() == 0: + logger.info(f"Activation Checkpointing Information") + logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") + logger.info( + f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") + logger.info(f"----Synchronization {SYNCHRONIZE}") + logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") + + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) + + if PARTITION_ACTIVATIONS: + inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) + elif CPU_CHECKPOINT: + inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint) + + # just in case something funky is happening such as reuse of inputs + inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint) + + # Copy the rng states. + fwd_cpu_rng_state = torch.get_rng_state() + fwd_cuda_rng_state = get_accelerator().get_rng_state() + fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + if PARTITION_ACTIVATIONS: + new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING) + assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}' + save_args_for_backward(*new_args) + elif CPU_CHECKPOINT: + new_args = get_cpu_activations_for_backward(args, inputs) + save_args_for_backward(*new_args) + else: + save_args_for_backward(*args) + + class Holder(): + """the place holder object used as activations to save memory""" + pass + + # weakref seems utilized to discover the tensor deletion before a whole + # forward backward pair loop finished + storage: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + weak_holder_list = [] + leaf_tensors = [] + backward_visited_leaf_nodes = 0 + + def checkpoint_pack(tensor_from_forward): + """used to record the activation order in the `weak_holder_list` + + the activation order in holder list is consistent between the first forward and recomputing forward. + * the jit compiled forward will break the order consistency * + """ + res = Holder() + weak_holder_list.append(weakref.ref(res)) + + # if this is a leaf tensor, save it for backward progression trace + # leaf tensor used to be input or parameters, which is not activations and + # has no memory overhead + if tensor_from_forward.requires_grad and tensor_from_forward.is_leaf: + leaf_tensors.append(tensor_from_forward) + return res + + def checkpoint_unpack(holder_from_backward): + """retrieve the activations from recompute""" + nonlocal deepspeed_saved_tensors, non_tensor_args, tensor_flags + + # if this is the first step of backward probagation, recompute the graph and save + # all the activations with the same order as `checkpoint_pack` does + if len(storage) == 0: + unpack_counter = 0 + + def replay_pack(tensor_from_replay): + """save recompute activations""" + nonlocal unpack_counter + unpack_counter += 1 + + if weak_holder_list[unpack_counter - 1]() is None: + return + + detached_activations = tensor_from_replay.detach() + storage[weak_holder_list[unpack_counter - 1]()] = detached_activations + + return + + def replay_unpack(none_value): + """recompute graph need not to backward""" + raise RuntimeError("You are calling backwards on a tensor that is never exposed.") + + global timers + see_memory_usage("In backward", force=False) + # removing pointers to the contiguous buffer memory + # so that they can be garbage collected once the checkpoints + # have been used + if SYNCHRONIZE: + get_accelerator().synchronize() + if PROFILE_TIME: + timers('backward').start() + + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + see_memory_usage("In backward checkpointing code", force=False) + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + + global cuda_device, transport_stream, PARTITION_ACTIVATIONS + + # gather inputs which is partitioned or checkpointed before first forward + if PARTITION_ACTIVATIONS: + # with get_accelerator().stream(transport_stream): + inputs = gather_partitioned_activations(deepspeed_saved_tensors, + device=cuda_device if CPU_CHECKPOINT else None) + detached_inputs = detach_variable(inputs) + elif CPU_CHECKPOINT: + inputs = move_to_device(deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint) + detached_inputs = detach_variable(inputs) + else: + inputs = deepspeed_saved_tensors + detached_inputs = detach_variable(inputs) + + # Add non tensor input args + detached_inputs = merge_tensors(tensor_objects=detached_inputs, + non_tensor_objects=non_tensor_args, + tensor_flags=tensor_flags) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(fwd_cpu_rng_state) + _set_cuda_rng_state(fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(fwd_cuda_rng_state_tracker) + + see_memory_usage("In backward checkpointing code before forward", force=False) + with torch.enable_grad(), torch.autograd.graph.saved_tensors_hooks(replay_pack, replay_unpack): + _unused = function(*detached_inputs) + + see_memory_usage("In backward checkpointing code after forward", force=False) + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + deepspeed_saved_tensors = None + non_tensor_args = None + tensor_flags = None + + if holder_from_backward not in storage: + raise RuntimeError("Attempt to retrieve a tensor saved by autograd multiple times without checkpoint" + " recomputation being triggered in between, this is not currently supported.") + + return storage[holder_from_backward] + + def after_backward_hook(_nonuse_grads): + """the hook registered to all leaf tensors""" + nonlocal leaf_tensors, backward_visited_leaf_nodes + backward_visited_leaf_nodes += 1 + + if backward_visited_leaf_nodes == len(leaf_tensors): + see_memory_usage("After backward checkpointing code after backward", force=False) + + if PROFILE_TIME: + timers('backward').stop() + timers.log(['backward']) + if SYNCHRONIZE: + get_accelerator().synchronize() + + with torch.autograd.graph.saved_tensors_hooks(checkpoint_pack, checkpoint_unpack): + outputs = function(*inputs_cuda) + for leaf_tensor in leaf_tensors: + leaf_tensor.register_hook(after_backward_hook) + + see_memory_usage("After running forward on the layer", force=False) + + if PROFILE_TIME: + timers(FORWARD_GLOBAL_TIMER).stop() + timers.log([FORWARD_GLOBAL_TIMER]) + if SYNCHRONIZE: + get_accelerator().synchronize() + + all_outputs = [] + if torch.is_tensor(outputs): + all_outputs += [outputs] + else: + all_outputs += outputs + + if len(all_outputs) == 1: + return all_outputs[0] + else: + return tuple(all_outputs) + + +def checkpoint(function, *args): + """Checkpoint a model or part of the model. + This has been directly copied from torch.utils.checkpoint. """ + + all_outputs = [] + CheckpointFunction.apply(function, all_outputs, *args) + if len(all_outputs) == 1: + return all_outputs[0] + else: + return tuple(all_outputs) + + +def partition_activations_in_checkpoint(partition_activation): + global PARTITION_ACTIVATIONS + PARTITION_ACTIVATIONS = partition_activation + if dist.get_rank() == 0: + logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************") + + +def set_num_layers(nlayers): + global num_layers + num_layers = nlayers + + +def reset(): + """Resets memory buffers related to contiguous memory optimizations. + Should be called during eval when multiple forward propagations are + computed without any backward propagation that usually clears these + buffers. + Arguments: + None + + Return: + None + """ + if CONTIGUOUS_CHECKPOINTING: + global data_offsets, size_offsets + global contiguous_data_buffers, contiguous_size_buffers + + for buffers in contiguous_data_buffers: + buffers = [] + + # frees up all the pointers to the checkpoints except for the ones + # stored by save for backward + contiguous_data_buffers = [] + contiguous_size_buffers = [] + data_offsets = [] + size_offsets = [] + + +def _configure_using_config_file(config, mpu=None): + global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config + if dist.get_rank() == 0: + logger.info(config.repr()) + PARTITION_ACTIVATIONS = config.partition_activations + CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization + num_layers = config.number_checkpoints + CPU_CHECKPOINT = config.cpu_checkpointing + SYNCHRONIZE = config.synchronize_checkpoint_boundary + PROFILE_TIME = config.profile + + +def _configure_defaults(): + + global mpu, num_layers, deepspeed_checkpointing_enabled + + global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + PARTITION_ACTIVATIONS = False + CONTIGUOUS_CHECKPOINTING = False + num_layers = False + CPU_CHECKPOINT = False + SYNCHRONIZE = False + PROFILE_TIME = False + deepspeed_checkpointing_enabled = True + + +def configure( + mpu_, + deepspeed_config=None, + partition_activations=None, + contiguous_checkpointing=None, + num_checkpoints=None, + checkpoint_in_cpu=None, + synchronize=None, + profile=None, +): + """Configure DeepSpeed Activation Checkpointing. + + Arguments: + mpu_: Optional: An object that implements the following methods + get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size + + deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to + configure DeepSpeed Activation Checkpointing + + partition_activations: Optional: Partitions activation checkpoint across model parallel + GPUs when enabled. By default False. Will overwrite deepspeed_config if provided + + contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory + buffer. Works only with homogeneous checkpoints when partition_activations is enabled. + Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if + provided + + num_checkpoints: Optional: Number of activation checkpoints stored during the forward + propagation of the model. Used to calculate the buffer size for contiguous_checkpointing + Will overwrite deepspeed_config if provided + + checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with + partition_activation. Default is false. Will overwrite deepspeed_config if provided + + synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of + each call to deepspeed.checkpointing.checkpoint for both forward and backward pass. + By default false. Will overwrite deepspeed_config if provided + + profile: Optional: Logs the forward and backward time for each + deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config + if provided + + Returns: + None + """ + global mpu, num_layers, deepspeed_checkpointing_enabled + + global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ + CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME + + _configure_defaults() + + if mpu_ is not None: + mpu = mpu_ + + if deepspeed_config is not None: + _configure_using_config_file(deepspeed_config, mpu=mpu) + + if partition_activations is not None: + PARTITION_ACTIVATIONS = partition_activations + + if contiguous_checkpointing is not None: + CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing + + if num_checkpoints is not None: + num_layers = num_checkpoints + + if checkpoint_in_cpu is not None: + CPU_CHECKPOINT = checkpoint_in_cpu + + if synchronize is not None: + SYNCHRONIZE = synchronize + + if profile is not None: + PROFILE_TIME = profile + + if CONTIGUOUS_CHECKPOINTING: + assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config" + if CONTIGUOUS_CHECKPOINTING: + assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing" + + +def is_configured(): + """True if deepspeed activation checkpointing has been configured + by calling deepspeed.checkpointing.configure, else returns false + + Arguments: + None + + Return: + True of configured, else False + """ + return deepspeed_checkpointing_enabled diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/config.py new file mode 100644 index 0000000000000000000000000000000000000000..dc07388a95da039b50bad87b4aa57b12f4e41f6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/activation_checkpointing/config.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject + +######################################### +# DeepSpeed Activation Checkpointing +######################################### +# Activation Checkpointing Allows to save memory by only keeping a select few +#activations for the backpropagation. +ACTIVATION_CHKPT_FORMAT = ''' +Activation Checkpointing should be configured as: +"session_params": { + "activation_checkpointing": { + "partitioned_activations": [true|false], + "number_checkpoints": 100, + "contiguous_memory_optimization": [true|false], + "cpu_checkpointing": [true|false], + "profile": [true|false], + "synchronize_checkpoint_boundary": [true|false], + } +} +''' + +ACT_CHKPT_PARTITION_ACTIVATIONS = 'partition_activations' +ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT = False + +ACT_CHKPT_NUMBER_CHECKPOINTS = 'number_checkpoints' +ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT = None + +ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION = 'contiguous_memory_optimization' +ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT = False + +ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY = 'synchronize_checkpoint_boundary' +ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT = False + +ACT_CHKPT_PROFILE = 'profile' +ACT_CHKPT_PROFILE_DEFAULT = False + +ACT_CHKPT_CPU_CHECKPOINTING = 'cpu_checkpointing' +ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT = False + +ACT_CHKPT = 'activation_checkpointing' + +ACT_CHKPT_DEFAULT = { + ACT_CHKPT_PARTITION_ACTIVATIONS: ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT, + ACT_CHKPT_NUMBER_CHECKPOINTS: ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT, + ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION: ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT, + ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY: ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT, + ACT_CHKPT_PROFILE: ACT_CHKPT_PROFILE_DEFAULT, + ACT_CHKPT_CPU_CHECKPOINTING: ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT +} + + +class DeepSpeedActivationCheckpointingConfig(DeepSpeedConfigObject): + + def __init__(self, param_dict): + super(DeepSpeedActivationCheckpointingConfig, self).__init__() + + self.partition_activations = None + self.contiguous_memory_optimization = None + self.cpu_checkpointing = None + self.number_checkpoints = None + self.synchronize_checkpoint_boundary = None + self.profile = None + + if ACT_CHKPT in param_dict.keys(): + act_chkpt_config_dict = param_dict[ACT_CHKPT] + else: + act_chkpt_config_dict = ACT_CHKPT_DEFAULT + + self._initialize(act_chkpt_config_dict) + + def _initialize(self, act_chkpt_config_dict): + self.partition_activations = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PARTITION_ACTIVATIONS, + ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT) + + self.contiguous_memory_optimization = get_scalar_param(act_chkpt_config_dict, + ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION, + ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT) + + self.cpu_checkpointing = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_CPU_CHECKPOINTING, + ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT) + + self.number_checkpoints = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_NUMBER_CHECKPOINTS, + ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT) + + self.profile = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PROFILE, ACT_CHKPT_PROFILE_DEFAULT) + + self.synchronize_checkpoint_boundary = get_scalar_param(act_chkpt_config_dict, + ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY, + ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7223ab3e06168cb73d65318cd60b94da836b6b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..accde86150916d5be1853f98ab563a85fa3cd3d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/module.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddbc0caced26c4e5dbcf21b821fbaa81b068b5cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/p2p.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/p2p.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ff8707d0fcca07c02242c6d6750ed6530e5a04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/p2p.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/schedule.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/schedule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b7c81b5304c0cabe6c57324b819b1fb97cbdac5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/schedule.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/topology.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/topology.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b95d50de63d235921dfba7025c3e8ac978a4b62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/__pycache__/topology.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/module.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/module.py new file mode 100644 index 0000000000000000000000000000000000000000..8036faef72eebdc0e8d7a1e7f28880675a331f0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/module.py @@ -0,0 +1,643 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import glob + +import re as regex + +from functools import partial + +import torch +import torch.nn as nn +from deepspeed import comm as dist + +from deepspeed.utils import logger +from .. import utils as ds_utils +from ..activation_checkpointing import checkpointing +from .topology import PipeDataParallelTopology, PipelineParallelGrid +from deepspeed.runtime.state_dict_factory import SDLoaderFactory +from deepspeed.accelerator import get_accelerator +from deepspeed.checkpoint.utils import clone_tensors_for_torch_save + + +class PipelineError(Exception): + """Errors related to the use of deepspeed.PipelineModule """ + + +class LayerSpec: + """Building block for specifying pipeline-parallel modules. + + LayerSpec stores the type information and parameters for each stage in a + PipelineModule. For example: + + .. code-block:: python + + nn.Sequence( + torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False), + torch.nn.Linear(self.hidden_hidden, self.out_dim) + ) + + becomes + + .. code-block:: python + + layer_specs = [ + LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False), + LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)] + ] + """ + + def __init__(self, typename, *module_args, **module_kwargs): + self.typename = typename + self.module_args = module_args + self.module_kwargs = module_kwargs + + if not issubclass(typename, nn.Module): + raise RuntimeError('LayerSpec only supports torch.nn.Module types.') + + if dist.is_initialized(): + self.global_rank = dist.get_rank() + else: + self.global_rank = -1 + + def __repr__(self): + return ds_utils.call_to_str(self.typename.__name__, self.module_args, self.module_kwargs) + + def build(self, log=False): + """Build the stored specification.""" + if log: + logger.info(f'RANK={self.global_rank} building {repr(self)}') + + return self.typename(*self.module_args, **self.module_kwargs) + + +class TiedLayerSpec(LayerSpec): + + def __init__(self, key, typename, *module_args, forward_fn=None, tied_weight_attr=['weight'], **module_kwargs): + super().__init__(typename, *module_args, **module_kwargs) + self.key = key + self.forward_fn = forward_fn + self.tied_weight_attr = [tied_weight_attr] if type(tied_weight_attr) == str else tied_weight_attr + + +class PipelineModule(nn.Module): + """Modules to be parallelized with pipeline parallelism. + + The key constraint that enables pipeline parallelism is the + representation of the forward pass as a sequence of layers + and the enforcement of a simple interface between them. The + forward pass is implicitly defined by the module ``layers``. The key + assumption is that the output of each layer can be directly fed as + input to the next, like a ``torch.nn.Sequence``. The forward pass is + implicitly: + + .. code-block:: python + + def forward(self, inputs): + x = inputs + for layer in self.layers: + x = layer(x) + return x + + .. note:: + Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3. + + Args: + layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module. + num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided. + topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``. + loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)`` + seed_layers(bool, optional): Use a different seed for each layer. Defaults to False. + seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator. + base_seed (int, optional): The starting seed. Defaults to 1234. + partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'. + activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing. + activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``. + checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering. + """ + + def __init__(self, + layers, + num_stages=None, + topology=None, + loss_fn=None, + seed_layers=False, + seed_fn=None, + base_seed=1234, + partition_method='parameters', + activation_checkpoint_interval=0, + activation_checkpoint_func=checkpointing.checkpoint, + checkpointable_layers=None): + + super().__init__() + + if num_stages is None and topology is None: + raise RuntimeError('must provide num_stages or topology') + + self.micro_offset = 0 + + self.loss_fn = loss_fn + + self.checkpointable_layers = checkpointable_layers + if checkpointable_layers is not None: + assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list." + + self.seed_layers = seed_layers + self.seed_fn = seed_fn + self.base_seed = base_seed + if dist.get_rank() == 0: + try: + seed_str = self.seed_fn.__name__ + except AttributeError: + seed_str = None + print(f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}') + + # Setup world info + self.world_group = dist.new_group(ranks=range(dist.get_world_size())) + self.global_rank = dist.get_rank(group=self.world_group) + self.world_size = dist.get_world_size(group=self.world_group) + self.local_rank = int(os.environ.get("LOCAL_RANK", None)) + assert self.local_rank is not None + + if topology: + self._topo = topology + self.num_stages = self._topo.get_dim('pipe') + else: + self.num_stages = num_stages + if topology is None: + if self.world_size % self.num_stages != 0: + raise RuntimeError( + f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})') + dp = self.world_size // num_stages + topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp) + self._topo = topology + + # Construct communicators for pipeline topology + self._grid = PipelineParallelGrid(process_group=self.world_group, topology=self._topo) + + self.stage_id = self._topo.get_coord(self.global_rank).pipe + + # Initialize partition information + self._layer_specs = list(layers) + self._num_layers = len(self._layer_specs) + self._local_start = 0 + self._local_stop = None + self._partition_layers(method=partition_method) + + self.forward_funcs = [] + self.fwd_map = {} + self.tied_modules = nn.ModuleDict() + self.tied_weight_attrs = {} + + # Offset the random seed by the stage ID. + #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() + #ds_utils.set_random_seed(newseed) + + #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): + self._build() + self.to(get_accelerator().device_name(self.local_rank)) + + self.tied_comms = self._index_tied_modules() + self._synchronize_tied_weights() + + self.activation_checkpoint_interval = activation_checkpoint_interval + + self.activation_checkpoint_func = activation_checkpoint_func + # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + + def _build(self): + specs = self._layer_specs + + for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]): + layer_idx = local_idx + self._local_start + if self.seed_layers: + if self.seed_fn: + self.seed_fn(self.base_seed + layer_idx) + else: + ds_utils.set_random_seed(self.base_seed + layer_idx) + + # Recursively build PipelineModule objects + if isinstance(layer, PipelineModule): + raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED') + + # LayerSpec objects contain an nn.Module that should be allocated now. + elif isinstance(layer, nn.Module): + name = str(layer_idx) + self.forward_funcs.append(layer) + self.fwd_map.update({name: len(self.forward_funcs) - 1}) + self.add_module(name, layer) + + # TiedLayerSpec objects contain an nn.Module that should be allocated now. + elif isinstance(layer, TiedLayerSpec): + # Build and register the module if we haven't seen it before. + if layer.key not in self.tied_modules: + self.tied_modules[layer.key] = layer.build() + self.tied_weight_attrs[layer.key] = layer.tied_weight_attr + + if layer.forward_fn is None: + # Just use forward() + self.forward_funcs.append(self.tied_modules[layer.key]) + else: + # User specified fn with args (module, input) + self.forward_funcs.append(partial(layer.forward_fn, self.tied_modules[layer.key])) + + # LayerSpec objects contain an nn.Module that should be allocated now. + elif isinstance(layer, LayerSpec): + module = layer.build() + name = str(layer_idx) + self.forward_funcs.append(module) + self.fwd_map.update({name: len(self.forward_funcs) - 1}) + self.add_module(name, module) + + # Last option: layer may be a functional (e.g., lambda). We do nothing in + # that case and just use it in forward() + else: + self.forward_funcs.append(layer) + + # All pipeline parameters should be considered as model parallel in the context + # of our FP16 optimizer + for p in self.parameters(): + p.ds_pipe_replicated = False + + def _get_frozen_parameter_names(self, layer): + """ Get names of frozen parameters in the layer. + + Returns: + A list of frozen parameter names + """ + if isinstance(layer, LayerSpec): + l = layer.build() + return [n for n, p in l.named_parameters() if not p.requires_grad] + elif isinstance(layer, nn.Module): + return [n for n, p in layer.named_parameters() if not p.requires_grad] + + return [] + + def _count_layer_params(self): + """Count the trainable parameters in individual layers. + + This routine will only build one layer at a time. + + Returns: + A list of the number of parameters in each layer. + """ + param_counts = [0] * len(self._layer_specs) + for idx, layer in enumerate(self._layer_specs): + if isinstance(layer, LayerSpec): + l = layer.build() + params = filter(lambda p: p.requires_grad, l.parameters()) + param_counts[idx] = sum(p.numel() for p in params) + elif isinstance(layer, nn.Module): + params = filter(lambda p: p.requires_grad, layer.parameters()) + param_counts[idx] = sum(p.numel() for p in params) + return param_counts + + def _find_layer_type(self, layername): + idxs = [] + typeregex = regex.compile(layername, regex.IGNORECASE) + for idx, layer in enumerate(self._layer_specs): + name = None + if isinstance(layer, LayerSpec): + name = layer.typename.__name__ + elif isinstance(layer, nn.Module): + name = layer.__class__.__name__ + else: + try: + name = layer.__name__ + except AttributeError: + continue + if typeregex.search(name): + idxs.append(idx) + + if len(idxs) == 0: + raise RuntimeError(f"Partitioning '{layername}' found no valid layers to partition.") + return idxs + + def forward(self, forward_input): + # We need to offset the seed by the microbatch ID. Save it in a local var to + # ensure it is preserved in the closure. Otherwise checkpointed forward funcs + # will see a different offset. + self.micro_offset += 1 + + def exec_range_func(start, end): + ''' Helper function to be used with checkpoint() + Adapted from torch.utils.checkpoint:checkpoint_sequential() + ''' + local_micro_offset = self.micro_offset + 1 + + def exec_func(*inputs): + # Single tensor inputs need to be unwrapped + if len(inputs) == 1: + inputs = inputs[0] + for idx, layer in enumerate(self.forward_funcs[start:end]): + self.curr_layer = idx + self._local_start + if self.seed_layers: + new_seed = (self.base_seed * local_micro_offset) + self.curr_layer + if self.seed_fn: + self.seed_fn(new_seed) + else: + ds_utils.set_random_seed(new_seed) + + inputs = layer(inputs) + return inputs + + return exec_func + + if self.activation_checkpoint_interval == 0: + func = exec_range_func(0, len(self.forward_funcs)) + x = func(forward_input) + else: + num_layers = len(self.forward_funcs) + x = forward_input + for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) + + funcs = self.forward_funcs[start_idx:end_idx] + # Since we either pass tensors or tuples of tensors without unpacking, we + # need to be careful not to double-wrap tensors with tuple. + if not isinstance(x, tuple): + x = (x, ) + + if self._is_checkpointable(funcs): + x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x) + else: + x = exec_range_func(start_idx, end_idx)(*x) + return x + + def _partition_layers(self, method='uniform'): + num_stages = self._topo.get_dim('pipe') + stage_id = self._topo.get_coord(self.global_rank).pipe + + if self.global_rank == 0: + logger.info(f'Partitioning pipeline stages with method {method}') + + method = method.lower() + + # Each stage gets a simple uniform number of layers. + if method == 'uniform': + num_layers = len(self._layer_specs) + self.parts = ds_utils.partition_uniform(num_items=num_layers, num_parts=num_stages) + elif method == 'parameters': + param_counts = self._count_layer_params() + self.parts = ds_utils.partition_balanced(weights=param_counts, num_parts=num_stages) + elif method.startswith('type:'): + layertype = method.split(':')[1] + binary_weights = [0] * len(self._layer_specs) + for idx in self._find_layer_type(layertype): + binary_weights[idx] = 1 + self.parts = ds_utils.partition_balanced(weights=binary_weights, num_parts=num_stages) + elif method == 'profile': + raise NotImplementedError(f'Partitioning method {method} not implemented.') + else: + raise NotImplementedError(f'Partitioning method {method} not implemented.') + + # Print some information on the partitioning. + if self.global_rank == 0: + for stage in range(num_stages): + start = self.parts[stage] + stop = self.parts[stage + 1] + print(f'stage={stage} layers={stop - start}') + for idx, layer in enumerate(self._layer_specs[start:stop]): + name = str(layer) + if isinstance(layer, LayerSpec): + name = layer.typename.__name__ + if isinstance(layer, nn.Module): + name = layer.__class__.__name__ + else: + try: + name = layer.__name__ + except AttributeError: + pass + print(f' {idx+start:2d}: {name}') + if self.loss_fn: + try: + print(f' loss: {self.loss_fn.__name__}') + except AttributeError: + print(f' loss: {self.loss_fn.__class__.__name__}') + + self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1]) + + def allreduce_tied_weight_gradients(self): + '''All reduce the gradients of the tied weights between tied stages''' + for key, comm in self.tied_comms.items(): + for attr_name in comm['weight_attr']: + weight = getattr(self.tied_modules[key], attr_name) + dist.all_reduce(weight.grad, group=comm['group']) + + def get_tied_weights_and_groups(self): + weight_group_list = [] + for key, comm in self.tied_comms.items(): + for attr_name in comm['weight_attr']: + weight = getattr(self.tied_modules[key], attr_name) + weight_group_list.append((weight, comm['group'])) + return weight_group_list + + def _synchronize_tied_weights(self): + for key, comm in self.tied_comms.items(): + for attr_name in comm['weight_attr']: + dist.broadcast( + getattr(comm['module'], attr_name), + src=min(comm['ranks']), + group=comm['group'], + ) + + def _index_tied_modules(self): + ''' Build communication structures for tied modules. ''' + tied_comms = {} + if self._topo.get_dim('pipe') == 1: + return tied_comms + + specs = self._layer_specs + tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec)) + for key in tie_keys: + # Find the layers that the tied module appears in + tied_layers = [] + for idx, layer in enumerate(specs): + if isinstance(layer, TiedLayerSpec) and layer.key == key: + tied_layers.append(idx) + # Find all stages with this tied module + # TODO: Would be nice to remove the nested data/model parallelism loops and + # TODO: instead generalize in some way, since we really just care about the + # TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...) + # TODO: fiber to generate process groups. + tied_stages = set(self.stage_owner(idx) for idx in tied_layers) + for dp in range(self._grid.data_parallel_size): + for mp in range(self._grid.get_slice_parallel_world_size()): + tied_ranks = [] + for s in sorted(tied_stages): + if self._grid.get_slice_parallel_world_size() > 1: + tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp, model=mp)) + else: + tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp)) + group = dist.new_group(ranks=tied_ranks) + + # Record this tied module if we own a local copy of it. + if self.global_rank in tied_ranks: + assert key in self.tied_modules + if key in self.tied_modules: + tied_comms[key] = { + 'ranks': tied_ranks, + 'group': group, + 'weight_attr': self.tied_weight_attrs[key], + 'module': self.tied_modules[key], + } + # Only count the tied module once in the eyes of the FP16 optimizer + if self.global_rank != tied_ranks[0]: + for p in self.tied_modules[key].parameters(): + p.ds_pipe_replicated = True + ''' + if len(tied_comms) > 0: + print(f'RANK={self.global_rank} tied_comms={tied_comms}') + ''' + + return tied_comms + + def partitions(self): + return self.parts + + def stage_owner(self, layer_idx): + assert 0 <= layer_idx < self._num_layers + for stage in range(self._topo.get_dim('pipe')): + if self.parts[stage] <= layer_idx < self.parts[stage + 1]: + return stage + raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}') + + def _set_bounds(self, start=None, stop=None): + """Manually define the range of layers that will be built on this process. + + These boundaries are treated as list slices and so start is inclusive and stop is + exclusive. The default of None for both results in all layers being built + locally. + """ + self._local_start = start + self._local_stop = stop + + def set_checkpoint_interval(self, interval): + assert interval >= 0 + self.checkpoint_interval = interval + + def topology(self): + """ ProcessTopology object to query process mappings. """ + return self._topo + + def mpu(self): + return self._grid + + def num_pipeline_stages(self): + return self._topo.get_dim('pipe') + + def ckpt_prefix(self, checkpoints_path, tag): + """Build a prefix for all checkpoint files written by this module. """ + # All checkpoint files start with this + rank_name = 'module' + + # Data parallelism is omitted from the naming convention because we are agnostic + # to this in the checkpoint. + omit_dims = frozenset(['data']) + axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims] + for dim in axes: + rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim) + rank_name += f'-{dim}_{rank:02d}' + + ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name) + return ckpt_name + + def ckpt_layer_path(self, ckpt_dir, local_layer_idx): + """Customize a prefix for a specific pipeline module layer. """ + idx = local_layer_idx + self._local_start + layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}') + rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank) + if rank_repr != '': + layer_ckpt_path += f'-{rank_repr}' + layer_ckpt_path += '-model_states.pt' + return layer_ckpt_path + + def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx): + """Get all ckpt file list for a specific pipeline module layer. """ + idx = local_layer_idx + self._local_start + layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-') + layer_ckpt_path += "*model_states.pt" + ckpt_files = glob.glob(layer_ckpt_path) + ckpt_files.sort() + return ckpt_files + + def save_state_dict(self, save_dir, checkpoint_engine, exclude_frozen_params=False): + # Processes having the same model parallel rank on different data parallel instances + # have identical layer weights. We can distribute the task of saving the layer weights + # among the data parallel ranks. For example, if a pipeline stage has 9 layers and + # if there are 2 data parallel instances, rank 0 will save the first 5 layers and + # rank 1 will save the last 4. + dp_rank = self._grid.data_parallel_id + dp_size = self._grid.data_parallel_size + num_layers = len(self.forward_funcs) + if self.checkpoint_parallel_write_pipeline: + # spread layers evenly across data parallel ranks + offsets = ds_utils.partition_uniform(num_layers, dp_size) + start, end = offsets[dp_rank], offsets[dp_rank + 1] + else: + # data parallel rank 0 writes all layers + if dp_rank != 0: + return + start, end = 0, num_layers + layer_list = self.forward_funcs[start:end] + + checkpoint_engine.makedirs(save_dir, exist_ok=True) + for idx, layer in enumerate(layer_list): + model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx) + if not hasattr(layer, 'state_dict'): + continue + + orig_state_dict = layer.state_dict() + if exclude_frozen_params: + for n in self._get_frozen_parameter_names(layer): + del orig_state_dict[n] + final_state_dict = clone_tensors_for_torch_save(orig_state_dict) + checkpoint_engine.save(final_state_dict, model_ckpt_path) + + def load_state_dir(self, load_dir, checkpoint_engine, strict=True): + for idx, layer in enumerate(self.forward_funcs): + # Functions, etc. will not have state_dicts + if not hasattr(layer, 'load_state_dict'): + continue + + # get all checkpoint files for the layer. + model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx) + mp_rank = self._grid.get_slice_parallel_rank() + mp_world_size = self._grid.get_slice_parallel_world_size() + + sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, + version=2.0, + checkpoint_engine=checkpoint_engine) + load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True) + + layer.load_state_dict(checkpoint, strict=strict) + + # if self._grid.data_parallel_id == 0: + # logger.info( + # f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}' + # ) + + self._synchronize_tied_weights() + + def _is_checkpointable(self, funcs): + + if self.activation_checkpoint_func is not checkpointing.non_reentrant_checkpoint: + # This hook excludes the embedding layer + # because only non_reentrant_checkpoint can accept inputs with requires_grad=False + # otherwise, the backward of the embedding layer won't receive gradients. + if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'): + return all('ParallelTransformerLayerPipe' in f.__class__.__name__ for f in funcs) + if self.checkpointable_layers is not None: + return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs) + params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)] + return any(len(list(p)) > 0 for p in params) + + def get_additional_losses(self): + """ Returns model specific additional losses for reporting + + Return a dictionary of {"loss name": loss_value} or None if no additional losses. + """ + return None diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/p2p.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/p2p.py new file mode 100644 index 0000000000000000000000000000000000000000..2b12a9573c4b9b0b6d7777e9d562a976fa3027d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/pipe/p2p.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import pickle +import typing + +import torch +from deepspeed import comm as dist + +from deepspeed.utils.torch import required_torch_version +from deepspeed.accelerator import get_accelerator + +_groups = None +_grid = None + +_async = [] + + +def can_send_recv() -> bool: + return required_torch_version(min_version=1.8) + + +#initializes adjacent process groups +#run this only after deepspeed.init_distributed() has been called +def init_process_groups(grid): + global _groups, _grid + _grid = grid + + assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism" + + if not can_send_recv(): + _groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups] + + +def _is_valid_send_recv(src_stage, dest_stage): + first_stage = 0 + last_stage = _grid.pipe_parallel_size - 1 + assert abs(src_stage-dest_stage) == 1 or \ + (src_stage == first_stage and dest_stage == last_stage) or \ + (src_stage == last_stage and dest_stage == first_stage), \ + "Functionality currently limited to send and receive between adjacent ranks only" + + +def send(tensor, dest_stage, async_op=False): + global _groups + assert async_op == False, "Doesn't support async_op true" + src_stage = _grid.get_stage_id() + _is_valid_send_recv(src_stage, dest_stage) + + dest_rank = _grid.stage_to_global(stage_id=dest_stage) + if async_op: + global _async + op = dist.isend(tensor, dest_rank) + _async.append(op) + else: + + if can_send_recv(): + return dist.send(tensor, dest_rank) + else: + group = _get_send_recv_group(src_stage, dest_stage) + src_rank = _grid.stage_to_global(stage_id=src_stage) + return dist.broadcast(tensor, src_rank, group=group, async_op=async_op) + + +def recv(tensor, src_stage, async_op=False): + global _groups + assert async_op == False, "Doesn't support async_op true" + dest_stage = _grid.get_stage_id() + _is_valid_send_recv(src_stage, dest_stage) + + src_rank = _grid.stage_to_global(stage_id=src_stage) + + if async_op: + global _async + op = dist.irecv(tensor, src_rank) + _async.append(op) + else: + if can_send_recv(): + return dist.recv(tensor, src_rank) + else: + group = _get_send_recv_group(src_stage, dest_stage) + return dist.broadcast(tensor, src_rank, group=group, async_op=async_op) + + +def wait(): + global _async + for op in _async: + op.wait() + _async = [] + + get_accelerator().synchronize() + + +def send_obj(msg: typing.Any, dest: int): + """Send an arbitrary python object to ``dest``. + + Note: ``msg`` must be pickleable. + + WARN: This incurs a CPU -> GPU transfer and should be used sparingly + for performance reasons. + + Args: + msg (typing.Any): The object to send. + dest (int): Destination rank. + """ + # serialize the message + msg = pickle.dumps(msg) + # construct a tensor to send + msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to(get_accelerator().device_name()) + + # Send meta and message + length_tensor = torch.tensor([len(msg)], dtype=torch.long).to(get_accelerator().device_name()) + dist.send(length_tensor, dst=dest) + dist.send(msg, dst=dest) + + +def recv_obj(sender: int) -> typing.Any: + """Receive an arbitrary python object from ``sender``. + + WARN: This incur a CPU <-> GPU transfers and should be used sparingly + for performance reasons. + + Args: + sender (int): The rank sending the message. + """ + # Get message meta + length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name()) + dist.recv(length, src=sender) + + # Receive and deserialize + msg = torch.empty(length.item(), dtype=torch.uint8).to(get_accelerator().device_name()) + dist.recv(msg, src=sender) + + msg = pickle.loads(msg.cpu().numpy().tobytes()) + + def _to(x): + """Recursively move to the current device.""" + if torch.is_tensor(x): + return x.to(get_accelerator().device_name()) + if isinstance(x, (tuple, list)): + ret = [_to(x_) for x_ in x] + if isinstance(x, tuple): + ret = tuple(ret) + return ret + # handle kwargs + if isinstance(x, dict): + ret = dict() + for key, val in x.items(): + ret[_to(key)] = _to(val) + return ret + + # Anything else is a no-op + return x + + msg = _to(msg) + return msg + + +def _get_send_recv_group(src_stage, dest_stage): + '''the group id is always the smaller rank unless its a wrap around''' + + stage_id = None + + first_stage = 0 + last_stage = _grid.pipe_parallel_size - 1 + + if (src_stage == first_stage and dest_stage == last_stage + or dest_stage == first_stage and src_stage == last_stage): + stage_id = last_stage + elif src_stage > dest_stage: + stage_id = dest_stage + else: + stage_id = src_stage + '''group_id corresponds to group of [group_id, group_id+1] + unless group_id is the rank of the last stage + in which case group_id corresponds to group[group_id-num_stages+1, group_id] + ''' + group_id = _grid.stage_to_global(stage_id=stage_id) + + return _groups[group_id] diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..83ac698458c7adac8bcda219b26f50cb0b2a2100 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jd-292.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e6a38d79d8f9e53a2ce11b68b4153062d4e96ec0b368d02b2e64f1b33c51693 +size 551 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b376ef7c9d32dd344e0fff0be5a30ae1e6dda779 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jd-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a787772d60fbfcc21a0e96fd81906f03542e0b942d19dcc95dae47498953a4fd +size 323 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdf-40675.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdf-40675.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d74f6d6f085d991634610476015839faf034ff2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdf-40675.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d48d9679789d6baf7d0d3c346e3576d7589b663c3640942f9c1dba76e355faaa +size 307 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..de6ccfccc5f28d446f34b7ffd7fcf83688cb00cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:141ba630e039ea44bbaef92a288e2d964fc3aa2ef805a9723b4aac738a26a627 +size 88 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..d1d26798a46116abdc22f357615f381a19bccf99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/data-v1-dl-4965250.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543d0887312f43d9f65a7e1d08be78a2436369f632d7382b4134cebb525a48a3 +size 3000 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jd-62.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jd-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..da14f86aac08c072962c2eecf6fe18cf319c5718 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jd-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef3551ad47d48023c5a1f1cf077047a9a4b95544bb91d4a86097f8b574f8d07 +size 656 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdq-62.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdq-62.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c8c1985e0bf13abce1abad45a5d872ccbcd44478 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_62/api-v1-jdq-62.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278a52a52d569f07d14c6a7877b104762c77daac429fb1fd9817a0378d6ec634 +size 805