diff --git a/.gitattributes b/.gitattributes index 0167660d5e399c31cd91aa2fc2fdbc3854ef488c..f92b1c0bfac048c6e433ff55cb3af1911ac00466 100644 --- a/.gitattributes +++ b/.gitattributes @@ -133,3 +133,6 @@ venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_infer.so.8 filte venv/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_73_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_73_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8a14bf13681d1e517970095b5a29f06575b4e4e --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_73_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:081c3fe30e2795fbebbb341a47b983accf3111d9cfe7e6ebd92947a65c70d497 +size 41830202 diff --git a/ckpts/llama-3b/global_step100/layer_04-model_00-model_states.pt b/ckpts/llama-3b/global_step100/layer_04-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..b0b9db3065723f8953aa9ba6f3ba47d9924e7a41 --- /dev/null +++ b/ckpts/llama-3b/global_step100/layer_04-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3889c55c2ae1034257f57e67f9a9e2cac9989c5cbde48fb99194c3f2def5d1cc +size 116407086 diff --git a/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c0e11581f8819d518d7664c7724c0ce63abad5ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81bf00d65473806844c2e1800e77c23d62a118255ab7c848c6619b444de4a6e +size 2586576 diff --git a/venv/lib/python3.10/site-packages/deepspeed/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe0043547860de44623ab9d191d181bafc1af4fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/__init__.py @@ -0,0 +1,348 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import sys +import types +import json +from typing import Optional, Union +import torch +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from packaging import version as pkg_version + +# Skip Triton import for AMD due to pytorch-triton-rocm module breaking device API in DeepSpeed +if not (hasattr(torch.version, 'hip') and torch.version.hip is not None): + try: + import triton # noqa: F401 # type: ignore + HAS_TRITON = True + except ImportError: + HAS_TRITON = False +else: + HAS_TRITON = False + +from . import ops +from . import module_inject + +from .accelerator import get_accelerator +from .constants import TORCH_DISTRIBUTED_DEFAULT_PORT +from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpeedSchedulerCallable +from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER +from .runtime.hybrid_engine import DeepSpeedHybridEngine +from .runtime.pipe.engine import PipelineEngine +from .inference.engine import InferenceEngine +from .inference.config import DeepSpeedInferenceConfig +from .runtime.lr_schedules import add_tuning_arguments +from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError +from .runtime.activation_checkpointing import checkpointing +from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig +from .module_inject import replace_transformer_layer, revert_transformer_layer + +from .utils import log_dist, OnDevice, logger +from .comm.comm import init_distributed + +from .runtime import zero +from .runtime.compiler import is_compile_supported + +from .pipe import PipelineModule + +from .git_version_info import version, git_hash, git_branch + + +def _parse_version(version_str): + '''Parse a version string and extract the major, minor, and patch versions.''' + ver = pkg_version.parse(version_str) + return ver.major, ver.minor, ver.micro + + +# Export version information +__version__ = version +__version_major__, __version_minor__, __version_patch__ = _parse_version(__version__) +__git_hash__ = git_hash +__git_branch__ = git_branch + +# Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init +dist = None + + +def initialize(args=None, + model: torch.nn.Module = None, + optimizer: Optional[Union[Optimizer, DeepSpeedOptimizerCallable]] = None, + model_parameters: Optional[torch.nn.Module] = None, + training_data: Optional[torch.utils.data.Dataset] = None, + lr_scheduler: Optional[Union[_LRScheduler, DeepSpeedSchedulerCallable]] = None, + distributed_port: int = TORCH_DISTRIBUTED_DEFAULT_PORT, + mpu=None, + dist_init_required: Optional[bool] = None, + collate_fn=None, + config=None, + config_params=None): + """Initialize the DeepSpeed Engine. + + Arguments: + args: an object containing local_rank and deepspeed_config fields. + This is optional if `config` is passed. + + model: Required: nn.module class before apply any wrappers + + optimizer: Optional: a user defined Optimizer or Callable that returns an Optimizer object. + This overrides any optimizer definition in the DeepSpeed json config. + + model_parameters: Optional: An iterable of torch.Tensors or dicts. + Specifies what Tensors should be optimized. + + training_data: Optional: Dataset of type torch.utils.data.Dataset + + lr_scheduler: Optional: Learning Rate Scheduler Object or a Callable that takes an Optimizer and returns a Scheduler object. + The scheduler object should define a get_lr(), step(), state_dict(), and load_state_dict() methods + + distributed_port: Optional: Master node (rank 0)'s free port that needs to be used for communication during distributed training + + mpu: Optional: A model parallelism unit object that implements + get_{model,data}_parallel_{rank,group,world_size}() + + dist_init_required: Optional: None will auto-initialize torch distributed if needed, + otherwise the user can force it to be initialized or not via boolean. + + collate_fn: Optional: Merges a list of samples to form a + mini-batch of Tensor(s). Used when using batched loading from a + map-style dataset. + + config: Optional: Instead of requiring args.deepspeed_config you can pass your deepspeed config + as an argument instead, as a path or a dictionary. + + config_params: Optional: Same as `config`, kept for backwards compatibility. + + Returns: + A tuple of ``engine``, ``optimizer``, ``training_dataloader``, ``lr_scheduler`` + + * ``engine``: DeepSpeed runtime engine which wraps the client model for distributed training. + + * ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if + optimizer is specified in json config else ``None``. + + * ``training_dataloader``: DeepSpeed dataloader if ``training_data`` was supplied, + otherwise ``None``. + + * ``lr_scheduler``: Wrapped lr scheduler if user ``lr_scheduler`` is passed, or + if ``lr_scheduler`` specified in JSON configuration. Otherwise ``None``. + """ + log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__, + __git_branch__), + ranks=[0]) + + # Disable zero.Init context if it's currently enabled + zero.partition_parameters.shutdown_init_context() + + assert model is not None, "deepspeed.initialize requires a model" + + global dist + from deepspeed import comm as dist + dist_backend = get_accelerator().communication_backend_name() + dist.init_distributed(dist_backend=dist_backend, + distributed_port=distributed_port, + dist_init_required=dist_init_required) + + # Set config using config_params for backwards compat + if config is None and config_params is not None: + config = config_params + + # Check for deepscale_config for backwards compat + if hasattr(args, "deepscale_config") and args.deepscale_config is not None: + logger.warning("************ --deepscale_config is deprecated, please use --deepspeed_config ************") + if hasattr(args, "deepspeed_config"): + assert (args.deepspeed_config is + None), "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config" + args.deepspeed_config = args.deepscale_config + args.deepscale_config = None + + # Check that we have only one config passed + if hasattr(args, "deepspeed_config") and args.deepspeed_config is not None: + assert config is None, "Not sure how to proceed, we were given deepspeed configs in the deepspeed arguments and deepspeed.initialize() function call" + config = args.deepspeed_config + assert config is not None, "DeepSpeed requires --deepspeed_config to specify configuration file" + + if not isinstance(model, PipelineModule): + config_class = DeepSpeedConfig(config, mpu) + if config_class.hybrid_engine.enabled: + engine = DeepSpeedHybridEngine(args=args, + model=model, + optimizer=optimizer, + model_parameters=model_parameters, + training_data=training_data, + lr_scheduler=lr_scheduler, + mpu=mpu, + dist_init_required=dist_init_required, + collate_fn=collate_fn, + config=config, + config_class=config_class) + else: + engine = DeepSpeedEngine(args=args, + model=model, + optimizer=optimizer, + model_parameters=model_parameters, + training_data=training_data, + lr_scheduler=lr_scheduler, + mpu=mpu, + dist_init_required=dist_init_required, + collate_fn=collate_fn, + config=config, + config_class=config_class) + else: + assert mpu is None, "mpu must be None with pipeline parallelism" + mpu = model.mpu() + config_class = DeepSpeedConfig(config, mpu) + engine = PipelineEngine(args=args, + model=model, + optimizer=optimizer, + model_parameters=model_parameters, + training_data=training_data, + lr_scheduler=lr_scheduler, + mpu=mpu, + dist_init_required=dist_init_required, + collate_fn=collate_fn, + config=config, + config_class=config_class) + + # Restore zero.Init context if necessary + zero.partition_parameters.restore_init_context() + + return_items = [engine, engine.optimizer, engine.training_dataloader, engine.lr_scheduler] + return tuple(return_items) + + +def _add_core_arguments(parser): + r"""Helper (internal) function to update an argument parser with an argument group of the core DeepSpeed arguments. + The core set of DeepSpeed arguments include the following: + 1) --deepspeed: boolean flag to enable DeepSpeed + 2) --deepspeed_config : path of a json configuration file to configure DeepSpeed runtime. + + This is a helper function to the public add_config_arguments() + + Arguments: + parser: argument parser + Return: + parser: Updated Parser + """ + group = parser.add_argument_group('DeepSpeed', 'DeepSpeed configurations') + + group.add_argument('--deepspeed', + default=False, + action='store_true', + help='Enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)') + + group.add_argument('--deepspeed_config', default=None, type=str, help='DeepSpeed json configuration file.') + + group.add_argument('--deepscale', + default=False, + action='store_true', + help='Deprecated enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)') + + group.add_argument('--deepscale_config', + default=None, + type=str, + help='Deprecated DeepSpeed json configuration file.') + + return parser + + +def add_config_arguments(parser): + r"""Update the argument parser to enabling parsing of DeepSpeed command line arguments. + The set of DeepSpeed arguments include the following: + 1) --deepspeed: boolean flag to enable DeepSpeed + 2) --deepspeed_config : path of a json configuration file to configure DeepSpeed runtime. + + Arguments: + parser: argument parser + Return: + parser: Updated Parser + """ + parser = _add_core_arguments(parser) + + return parser + + +def default_inference_config(): + """ + Return a default DeepSpeed inference configuration dictionary. + """ + return DeepSpeedInferenceConfig().dict() + + +def init_inference(model, config=None, **kwargs): + """Initialize the DeepSpeed InferenceEngine. + + Description: all four cases are valid and supported in DS init_inference() API. + + # Case 1: user provides no config and no kwargs. Default config will be used. + + .. code-block:: python + + generator.model = deepspeed.init_inference(generator.model) + string = generator("DeepSpeed is") + print(string) + + # Case 2: user provides a config and no kwargs. User supplied config will be used. + + .. code-block:: python + + generator.model = deepspeed.init_inference(generator.model, config=config) + string = generator("DeepSpeed is") + print(string) + + # Case 3: user provides no config and uses keyword arguments (kwargs) only. + + .. code-block:: python + + generator.model = deepspeed.init_inference(generator.model, + tensor_parallel={"tp_size": world_size}, + dtype=torch.half, + replace_with_kernel_inject=True) + string = generator("DeepSpeed is") + print(string) + + # Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence. + + .. code-block:: python + + generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True) + string = generator("DeepSpeed is") + print(string) + + Arguments: + model: Required: original nn.module object without any wrappers + + config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file + + Returns: + A deepspeed.InferenceEngine wrapped model. + """ + log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__, + __git_branch__), + ranks=[0]) + + # Load config_dict from config first + if config is None: + config = {} + if isinstance(config, str): + with open(config, "r") as f: + config_dict = json.load(f) + elif isinstance(config, dict): + config_dict = config + else: + raise ValueError(f"'config' argument expected string or dictionary, got {type(config)}") + + # Update with values from kwargs, ensuring no conflicting overlap between config and kwargs + overlap_keys = set(config_dict.keys()).intersection(kwargs.keys()) + # If there is overlap, error out if values are different + for key in overlap_keys: + if config_dict[key] != kwargs[key]: + raise ValueError(f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}") + config_dict.update(kwargs) + + ds_inference_config = DeepSpeedInferenceConfig(**config_dict) + + engine = InferenceEngine(model, config=ds_inference_config) + + return engine diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3823b49a7d6df379a4d10c7b1b9f20e2ab7362c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbdc1efe3c868cdc352809fc84ccc906b082519e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/env_report.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/env_report.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..105a58fd62212511a70a9fed9e0449dbb142691b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/env_report.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c296ae40f3c07bc7306c0d0450e45b0a1ed0d91 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info_installed.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info_installed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39cd3de4c9a43c676832929168c3e58d36f3b5e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/git_version_info_installed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/__pycache__/pydantic_v1.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/pydantic_v1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a422f0baa49c87b8b6644982199e3586f335370 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/__pycache__/pydantic_v1.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/constants.py b/venv/lib/python3.10/site-packages/deepspeed/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..30135f41b7b68920c32bb2a259a8e431196f4d53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/constants.py @@ -0,0 +1,21 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from datetime import timedelta + +############################################# +# Torch distributed constants +############################################# +TORCH_DISTRIBUTED_DEFAULT_PORT = 29500 + +# Default process group wide timeout, if applicable. +# This only applies to the gloo and nccl backends +# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1). +# To make an attempt at backwards compatibility with THD, we use an +# extraordinarily high default timeout, given that THD did not have timeouts. +default_pg_timeout = timedelta(minutes=int(os.getenv("DEEPSPEED_TIMEOUT", default=30))) +INFERENCE_GENERIC_MODE = 'generic' +INFERENCE_SPECIALIZED_MODE = 'specialized' diff --git a/venv/lib/python3.10/site-packages/deepspeed/env_report.py b/venv/lib/python3.10/site-packages/deepspeed/env_report.py new file mode 100644 index 0000000000000000000000000000000000000000..85a2f9b2b6c04f037d3ac03c9bcbcb2c532e704b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/env_report.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +import deepspeed +import subprocess +import argparse +from .ops.op_builder.all_ops import ALL_OPS +from .git_version_info import installed_ops, torch_info, accelerator_name +from deepspeed.accelerator import get_accelerator + +GREEN = '\033[92m' +RED = '\033[91m' +YELLOW = '\033[93m' +END = '\033[0m' +SUCCESS = f"{GREEN} [SUCCESS] {END}" +OKAY = f"{GREEN}[OKAY]{END}" +WARNING = f"{YELLOW}[WARNING]{END}" +FAIL = f'{RED}[FAIL]{END}' +INFO = '[INFO]' + +color_len = len(GREEN) + len(END) +okay = f"{GREEN}[OKAY]{END}" +warning = f"{YELLOW}[WARNING]{END}" + + +def op_report(verbose=True): + max_dots = 23 + max_dots2 = 11 + h = ["op name", "installed", "compatible"] + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + print("DeepSpeed C++/CUDA extension op report") + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + + print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n" + " runtime if needed. Op compatibility means that your system\n" + " meet the required dependencies to JIT install the op.") + + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + print("JIT compiled ops requires ninja") + ninja_status = OKAY if ninja_installed() else FAIL + print('ninja', "." * (max_dots - 5), ninja_status) + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2]) + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + installed = f"{GREEN}[YES]{END}" + no = f"{YELLOW}[NO]{END}" + for op_name, builder in ALL_OPS.items(): + dots = "." * (max_dots - len(op_name)) + is_compatible = OKAY if builder.is_compatible(verbose) else no + is_installed = installed if installed_ops.get(op_name, + False) and accelerator_name == get_accelerator()._name else no + dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) - (len(is_installed) - color_len)) + print(op_name, dots, is_installed, dots2, is_compatible) + print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) + + +def ninja_installed(): + try: + import ninja # noqa: F401 # type: ignore + except ImportError: + return False + return True + + +def nvcc_version(): + import torch.utils.cpp_extension + cuda_home = torch.utils.cpp_extension.CUDA_HOME + if cuda_home is None: + return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}" + try: + output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True) + except FileNotFoundError: + return f"{RED} [FAIL] nvcc missing {END}" + output_split = output.split() + release_idx = output_split.index("release") + release = output_split[release_idx + 1].replace(',', '').split(".") + return ".".join(release) + + +def installed_cann_path(): + if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]): + return os.environ["ASCEND_HOME_PATH"] + return None + + +def installed_cann_version(): + import re + ascend_path = installed_cann_path() + if ascend_path is None: + return f"CANN_HOME does not exist, unable to compile NPU op(s)" + cann_version = "" + for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)): + if cann_version: + break + install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)] + if install_files: + filepath = os.path.join(dirpath, install_files[0]) + with open(filepath, "r") as f: + for line in f: + if line.find("version") != -1: + cann_version = line.strip().split("=")[-1] + break + return cann_version + + +def get_shm_size(): + try: + shm_stats = os.statvfs('/dev/shm') + except (OSError, FileNotFoundError, ValueError): + return "UNKNOWN", None + + shm_size = shm_stats.f_frsize * shm_stats.f_blocks + shm_hbytes = human_readable_size(shm_size) + warn = [] + if shm_size < 512 * 1024**2: + warn.append( + f" {YELLOW} [WARNING] /dev/shm size might be too small, if running in docker increase to at least --shm-size='1gb' {END}" + ) + if get_accelerator().communication_backend_name() == "nccl": + warn.append( + f" {YELLOW} [WARNING] see more details about NCCL requirements: https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#sharing-data {END}" + ) + return shm_hbytes, warn + + +def human_readable_size(size): + units = ['B', 'KB', 'MB', 'GB', 'TB'] + i = 0 + while size >= 1024 and i < len(units) - 1: + size /= 1024 + i += 1 + return f'{size:.2f} {units[i]}' + + +def debug_report(): + max_dots = 33 + + report = [("torch install path", torch.__path__), ("torch version", torch.__version__), + ("deepspeed install path", deepspeed.__path__), + ("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}")] + if get_accelerator().device_name() == 'cuda': + hip_version = getattr(torch.version, "hip", None) + report.extend([("torch cuda version", torch.version.cuda), ("torch hip version", hip_version), + ("nvcc version", (None if hip_version else nvcc_version())), + ("deepspeed wheel compiled w.", f"torch {torch_info['version']}, " + + (f"hip {torch_info['hip_version']}" if hip_version else f"cuda {torch_info['cuda_version']}")) + ]) + elif get_accelerator().device_name() == 'npu': + import torch_npu + report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']}"), + ("torch_npu install path", torch_npu.__path__), ("torch_npu version", torch_npu.__version__), + ("ascend_cann version", installed_cann_version())]) + else: + report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']} ")]) + + report.append(("shared memory (/dev/shm) size", get_shm_size())) + + print("DeepSpeed general environment info:") + for name, value in report: + warns = [] + if isinstance(value, tuple): + value, warns = value + print(name, "." * (max_dots - len(name)), value) + if warns: + for warn in warns: + print(warn) + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('--hide_operator_status', + action='store_true', + help='Suppress display of installation and compatibility statuses of DeepSpeed operators. ') + parser.add_argument('--hide_errors_and_warnings', action='store_true', help='Suppress warning and error messages.') + args = parser.parse_args() + return args + + +def main(hide_operator_status=False, hide_errors_and_warnings=False): + if not hide_operator_status: + op_report(verbose=not hide_errors_and_warnings) + debug_report() + + +def cli_main(): + args = parse_arguments() + main(hide_operator_status=args.hide_operator_status, hide_errors_and_warnings=args.hide_errors_and_warnings) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/deepspeed/git_version_info.py b/venv/lib/python3.10/site-packages/deepspeed/git_version_info.py new file mode 100644 index 0000000000000000000000000000000000000000..70c536d2f78eee24c21db985c43fc9a3c017d4ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/git_version_info.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +try: + # This is populated by setup.py + from .git_version_info_installed import * # noqa: F401 # type: ignore +except ModuleNotFoundError: + import os + if os.path.isfile('version.txt'): + # Will be missing from checkouts that haven't been installed (e.g., readthedocs) + version = open('version.txt', 'r').read().strip() + else: + version = "0.0.0" + git_hash = '[none]' + git_branch = '[none]' + + from .ops.op_builder.all_ops import ALL_OPS + installed_ops = dict.fromkeys(ALL_OPS.keys(), False) + accelerator_name = "" + torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"} + +# compatible_ops list is recreated for each launch +from .ops.op_builder.all_ops import ALL_OPS + +compatible_ops = dict.fromkeys(ALL_OPS.keys(), False) +for op_name, builder in ALL_OPS.items(): + op_compatible = builder.is_compatible() + compatible_ops[op_name] = op_compatible + compatible_ops["deepspeed_not_implemented"] = False diff --git a/venv/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py b/venv/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py new file mode 100644 index 0000000000000000000000000000000000000000..1478aa8b3441149ce49e7e488864cbee6ccaa296 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/git_version_info_installed.py @@ -0,0 +1,6 @@ +version='0.14.2' +git_hash='unknown' +git_branch='unknown' +installed_ops={'deepspeed_not_implemented': False, 'deepspeed_ccl_comm': False, 'deepspeed_shm_comm': False, 'cpu_adam': False, 'fused_adam': False} +accelerator_name='cpu' +torch_info={'version': '2.3', 'bf16_support': False, 'cuda_version': '12.1', 'nccl_version': '2.20', 'hip_version': '0.0'} diff --git a/venv/lib/python3.10/site-packages/deepspeed/pydantic_v1.py b/venv/lib/python3.10/site-packages/deepspeed/pydantic_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..6aba072ad929b8b331a64097da75f172336b7b85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/pydantic_v1.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +"""Pydantic v1 compatibility module. + +Pydantic v2 introduced breaking changes that hinder its adoption: +https://docs.pydantic.dev/latest/migration/. To provide deepspeed users the option to +migrate to pydantic v2 on their own timeline, deepspeed uses this compatibility module +as a pydantic-version-agnostic alias for pydantic's v1 API. +""" + +try: + from pydantic.v1 import * # noqa: F401 +except ImportError: + from pydantic import * # noqa: F401 diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208299fb8c50f73468d293b6fa5dca71649d62e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/bf16_optimizer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/bf16_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1f3365b20f4e6aba935a6c3e6b9379bca3f08042 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/bf16_optimizer.py @@ -0,0 +1,556 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from collections import OrderedDict +import torch +import sys +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed import comm as dist +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from packaging import version as pkg_version +from deepspeed.git_version_info import version +from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, + align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter, + see_memory_usage, graph_process, get_norm_with_moe_layers) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups +from deepspeed.moe.utils import is_moe_param, is_moe_param_group +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank +from deepspeed.checkpoint import enable_universal_checkpoint +from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, + SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, + PARAM_SLICE_MAPPINGS) + +setattr(sys.modules[__name__], 'fragment_address', fragment_address) + + +class BF16_Optimizer(ZeROOptimizer): + + def __init__(self, + init_optimizer, + param_names, + mpu=None, + clip_grad=0.0, + norm_type=2, + allgather_bucket_size=5000000000, + dp_process_group=None, + timers=None, + grad_acc_dtype=None, + graph_harvesting=False, + immediate_grad_update=False, + has_moe_layers=False): + super().__init__() + see_memory_usage('begin bf16_optimizer', force=True) + self.timers = timers + self.optimizer = init_optimizer + self.param_names = param_names + self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) + + assert grad_acc_dtype in [torch.float32, torch.bfloat16 + ], f"BF16Optimizer: Unsupported gradient accumulation data type: {grad_acc_dtype}" + self.grad_acc_dtype = grad_acc_dtype + self.immediate_grad_update = immediate_grad_update + + self.clip_grad = clip_grad + self.norm_type = norm_type + self.mpu = mpu + self.allgather_bucket_size = int(allgather_bucket_size) + self.dp_process_group = dp_process_group + self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.has_moe_layers = has_moe_layers + self.non_expert_gradients = [] + self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + if self.has_moe_layers: + self._configure_moe_settings() + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + #align nccl all-gather send buffers to 4-bye boundary + self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 + + # Build BF16/FP32 groups + self.bf16_groups = [] + self.bf16_groups_flat = [] + self.bf16_partitioned_groups = [] + + self.fp32_groups_flat_partition = [] + + # Maintain different fp32 gradients views for convenience + self.fp32_groups_gradients = [] + self.fp32_groups_gradient_dict = {} + self.fp32_groups_gradients_flat = [] + self.fp32_groups_actual_gradients_flat = [] + self.fp32_groups_gradient_flat_partition = [] + self.fp32_groups_has_gradients = [] + + self.group_paddings = [] + self.graph_harvesting = graph_harvesting + if self.using_real_optimizer: + self._setup_for_real_optimizer() + + see_memory_usage('end bf16_optimizer', force=True) + + def _configure_moe_settings(self): + assert any( + [is_moe_param_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_param_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name']) + self.expert_gradients = {} + if self.has_moe_layers: + for key in groups._get_expert_data_parallel_group_dict().keys(): + self.expert_gradients[key] = [] + + def _setup_for_real_optimizer(self): + self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group] + + for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + see_memory_usage(f'before initializing group {i}', force=True) + + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # grab the original list + trainable_parameters = [param for param in param_group['params'] if param.requires_grad] + self.bf16_groups.append(trainable_parameters) + + # create flat bf16 params + self.bf16_groups_flat.append( + self._flatten_dense_tensors_aligned(self.bf16_groups[i], + self.nccl_start_alignment_factor * real_dp_world_size)) + # Make bf16 params point to flat tensor storage + self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], + flat_tensor=self.bf16_groups_flat[i]) + + # divide flat weights into equal sized partitions + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + bf16_dp_partitions = [ + self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) + for dp_index in range(real_dp_world_size) + ] + self.bf16_partitioned_groups.append(bf16_dp_partitions) + + # create fp32 params partition + self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.bf16_groups[i]] + + # create fp32 gradients + fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.bf16_groups_flat[i].numel() - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + + # update optimizer param groups to reference fp32 params partition + param_group['params'] = [self.fp32_groups_flat_partition[i]] + + see_memory_usage(f'after initializing group {i}', force=True) + + see_memory_usage('before initialize_optimizer', force=True) + self.initialize_optimizer_states() + see_memory_usage('end initialize_optimizer', force=True) + + if self.immediate_grad_update: + self.create_grad_acc_hooks() + + # Need optimizer states initialized before linking lp to optimizer state + self._link_all_hp_params() + self._hp_optimizer_states_linked = False + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _enable_universal_checkpoint(self): + for lp_param_group in self.bf16_groups: + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, _ in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + for lp in self.bf16_groups[i]: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + for i, _ in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + + # Link bf16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + flat_hp_partition = self.fp32_groups_flat_partition[i] + link_hp_params(lp_param_list=self.bf16_groups[i], + flat_hp_partition=flat_hp_partition, + gradient_dict=self.fp32_groups_gradient_dict, + offload_gradient_dict=None, + use_offload=False, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + dp_group=self.real_dp_process_group[i]) + + def _lazy_init_hp_params_optimizer_state(self): + if not self._hp_optimizer_states_linked: + for i, _ in enumerate(self.optimizer.param_groups): + lazy_init_hp_params_optimizer_state(self.bf16_groups[i], self.fp32_groups_flat_partition[i], + self.optimizer.state) + self._hp_optimizer_states_linked = True + + def initialize_optimizer_states(self): + """Take an optimizer step with zero-valued gradients to allocate internal + optimizer state. + + This helps prevent memory fragmentation by allocating optimizer state at the + beginning of training instead of after activations have been allocated. + """ + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + self.clear_hp_grads() + + def _split_flat_tensor(self, flat_tensor, num_elem_list): + assert sum(num_elem_list) <= flat_tensor.numel() + tensor_list = [] + offset = 0 + for num_elem in num_elem_list: + dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem) + tensor_list.append(dense_tensor) + offset += num_elem + + return tensor_list + + def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor): + updated_params = self.unflatten(flat_tensor, tensor_list) + for p, q in zip(tensor_list, updated_params): + p.data = q.data + + def _flatten_dense_tensors_aligned(self, tensor_list, alignment): + return self.flatten(align_dense_tensors(tensor_list, alignment)) + + @torch.no_grad() + def step(self, closure=None): + if closure is not None: + raise NotImplementedError(f'{self.__class__} does not support closure.') + + non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm() + non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm, + mpu=self.mpu, + norm_type=self.norm_type, + use_graph=self.graph_harvesting) + all_groups_norm = non_expert_groups_norm + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + + self._global_grad_norm = all_groups_norm + + assert all_groups_norm > 0. + if self.clip_grad > 0.: + clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True), + max_norm=self.clip_grad, + global_norm=all_groups_norm, + mpu=self.mpu, + use_graph=self.graph_harvesting) + + self.optimizer.step() + + # We need to link optimizer state after the first step() call + self._lazy_init_hp_params_optimizer_state() + + self.update_lp_params() + + self.clear_hp_grads() + + def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs): + """Perform a backward pass and copy the low-precision gradients to the + high-precision copy. + + We copy/accumulate to the high-precision grads now to prevent accumulating in the + bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1) + + The low-precision grads are deallocated during this procedure. + """ + self.clear_lp_grads() + loss.backward(**bwd_kwargs) + + if update_hp_grads: + self.update_hp_grads(clear_lp_grads=clear_lp_grads) + + @torch.no_grad() + def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): + if lp.grad is None: + return + + hp_grad = self.fp32_groups_gradients[group_idx][param_idx] + assert hp_grad is not None, \ + f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{group_idx}][{param_idx}]' + + hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape)) + lp._hp_grad = hp_grad + self.fp32_groups_has_gradients[group_idx][param_idx] = True + + # clear gradients + if clear_lp_grads: + lp.grad.zero_() + + @torch.no_grad() + def _update_hp_grads_func(self, clear_lp_grads=False): + for i, group in enumerate(self.bf16_groups): + for j, lp in enumerate(group): + self._update_hp_grad(lp, i, j, clear_lp_grads) + + @torch.no_grad() + def update_hp_grads(self, clear_lp_grads=False): + if self.immediate_grad_update: + return + + if self.graph_harvesting: + graph_process(False, self._update_hp_grads_func, clear_lp_grads) + else: + self._update_hp_grads_func(clear_lp_grads) + #cpu op + for i, group in enumerate(self.bf16_groups): + for j, lp in enumerate(group): + if lp.grad is None: + continue + self.fp32_groups_has_gradients[i][j] = True + + @torch.no_grad() + def get_grads_for_reduction(self): + if self.has_moe_layers: + return self.non_expert_gradients, self.expert_gradients + return self.non_expert_gradients, {} + + @torch.no_grad() + def get_grads_for_norm(self, for_clipping=False): + """ + Returns: + tuple[list[Tensor], dict[ep_name, List[Tensor]] | list: + If for_clipping, return all gradients. + Otherwise, separate and return dict of expert_grad and list of non_expert_grad + """ + # (grads, expert_group_name) + expert_grads_for_norm = {} + + # grads + non_expert_grads_for_norm = [] + all_grads_for_clip = [] + + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + assert len(self.bf16_groups) == len(self.optimizer.param_groups) + for i, group in enumerate(self.bf16_groups): + for j, lp in enumerate(group): + if not for_clipping: + if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: + continue + + # skip duplicated parameters. perform norm only on cards with tp_rank=0. + # non-duplicated parameters include: + # - Parameters with tp: Use allreducesum of mp_group. + # - Moe Parameters with ep: Use allreducesum of ep_group. + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)): + continue + + if not self.fp32_groups_has_gradients[i][j]: + continue + if not for_clipping: + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j]) + else: + non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j]) + else: + all_grads_for_clip.append(self.fp32_groups_gradients[i][j]) + if not for_clipping: + return non_expert_grads_for_norm, expert_grads_for_norm + return all_grads_for_clip + + @torch.no_grad() + def update_lp_params(self): + for i, (bf16_partitions, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + bf16_partitions[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + all_gather_dp_groups(groups_flat=self.bf16_groups_flat, + partitioned_param_groups=self.bf16_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + + def clear_hp_grads(self): + for flat_gradients in self.fp32_groups_gradients_flat: + flat_gradients.zero_() + + for i, group in enumerate(self.fp32_groups_gradients): + self.fp32_groups_has_gradients[i] = [False] * len(group) + + def clear_lp_grads(self): + + # using zero_() fixed memory address for graph replay + set_to_none = False if self.graph_harvesting else True + zero_grads_list = [] + for group in self.bf16_groups: + for param in group: + if set_to_none: + param.grad = None + elif param.grad is not None: + if param.grad.grad_fn is not None: + param.grad.detach_() + zero_grads_list.append(param.grad) + if not set_to_none and len(zero_grads_list) > 0: + torch._foreach_zero_(zero_grads_list) + + def state_dict(self): + state_dict = {} + state_dict[CLIP_GRAD] = self.clip_grad + state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() + state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition + state_dict[GROUP_PADDINGS] = self.group_paddings + state_dict[PARTITION_COUNT] = self.partition_count + state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings + + return state_dict + + # Restore base optimizer fp32 weights bfloat16 weights + def _restore_from_bit16_weights(self): + for i, group in enumerate(self.bf16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition): + fp32_partition.data.copy_(bf16_partitions[partition_id].data) + + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + def load_state_dict(self, + state_dict_list, + checkpoint_folder, + load_optimizer_states=True, + load_from_fp32_weights=False, + load_serial=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) + + def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): + + dp_rank = dist.get_rank(group=self.dp_process_group) + current_rank_sd = state_dict_list[dp_rank] + + ckpt_version = current_rank_sd.get(DS_VERSION, False) + assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" + ckpt_version = pkg_version.parse(ckpt_version) + + self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) + + if load_optimizer_states: + print(f"_load_legacy_checkpoint current_rank_sd[BASE_OPTIMIZER_STATE]") + self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + + if load_from_fp32_weights: + for current, saved in zip(self.fp32_groups_flat_partition, + current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): + src_tensor = _get_padded_tensor(saved, current.numel()) + current.data.copy_(src_tensor.data) + + if load_optimizer_states: + self._link_all_hp_params() + + def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): + self.load_hp_checkpoint_state_from_checkpoint_dir("bf16_groups", checkpoint_folder) + + def _load_global_state(self, sd): + pass + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): + assert self.immediate_grad_update + self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=True) + + def create_grad_acc_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.bf16_groups): + for j, param in enumerate(param_group): + if param.requires_grad: + + def wrapper(param, i, j): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def accumulate_hp_grads_and_remove_lp(*notneeded): + self.accumulate_hp_grads_and_remove_lp(param, i, j) + + grad_acc.register_hook(accumulate_hp_grads_and_remove_lp) + self.grad_accs.append(grad_acc) + + wrapper(param, i, j) + + +def _get_padded_tensor(src_tensor, size): + if src_tensor.numel() >= size: + return src_tensor + padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) + slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) + slice_tensor.data.copy_(src_tensor.data) + return padded_tensor diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/compiler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b5e4e33425d028cfd14246dc85cec514ad2b016d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/compiler.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Union, Callable, Dict, Any +import importlib +import torch +from ..pydantic_v1 import validator +from .config_utils import DeepSpeedConfigModel + +COMPILE_CONFIG = "compile" + + +def is_compile_supported(): + return hasattr(torch, "compiler") + + +def disable(func): + if is_compile_supported(): + return torch.compiler.disable(func) + return func + + +def get_compile_config(param_dict): + if COMPILE_CONFIG in param_dict: + compile_config_dict = param_dict[COMPILE_CONFIG] + else: + compile_config_dict = {} + return CompileConfig(**compile_config_dict) + + +def get_backend_fn(backend: Union[str, Callable]) -> Union[str, Callable]: + if isinstance(backend, Callable): + return backend + + elif isinstance(backend, str): + if backend in torch._dynamo.list_backends(exclude_tags=()): + return backend + + # Get module name from backend name + module_name = '.'.join(backend.split('.')[:-1]) + fn_name = backend.split('.')[-1] + + try: + module = importlib.import_module(module_name) + backend_fn = getattr(module, fn_name) + except ImportError: + raise ValueError( + f"The backend {backend} is not in the list of available backends and could not be imported.") + return backend_fn + + raise ValueError(f"backend for torch.compile must be a string or Callable: {backend}") + + +class CompileConfig(DeepSpeedConfigModel): + """ + [EXPERIMENTAL] This configuration enables users to activate `torch.compile` within DeepSpeed and customize its settings. + Please be aware that these features and API designs are experimental and subject to change. + """ + + enabled: bool = False + """ + Enable torch.compile when True. + """ + + backend: str = "inductor" + """ + Passed to `backend` argument of torch.compile. + If the given value is not in torch._dynamo.list_backends(), + DeepSpeed attempts to import and instantiate the module with the given name. + """ + + kwargs: Dict[str, Any] = {} + """ + Passed to `kwargs` argument of torch.compile. + """ + + @validator("enabled") + def validate_enabled(cls, field_value, values): + if field_value and not is_compile_supported(): + raise ValueError("torch.compile is not supported on this version of PyTorch.") + return field_value + + +class CompiledModuleWrapper(torch.nn.Module): + + def __init__(self, module, compile_config: Union[CompileConfig, None] = None): + super().__init__() + + assert is_compile_supported(), "torch.compile is not supported on this version of PyTorch." + + modules = self.__dict__.get('_modules') + modules['wrapped'] = module + self.__dict__['wrapped'] = module + self._is_compiled = False + self._backend = get_backend_fn(compile_config.backend) + self._compile_kwargs = compile_config.kwargs + self._compiler_fn = None + + def __getattr__(self, name): + return getattr(self.__dict__['wrapped'], name) + + def set_backend(self, backend: Union[str, Callable]): + """Set the backend for torch.compile. + + Args: + backend (Union[str, Callable]): backend name or a function that takes a torch.nn.Module and returns a compiled module. + You can directly pass a function that works as a backend. + See also `backend` field in `CompileConfig` for more details. + """ + self._backend = get_backend_fn(backend) + + def set_torch_compile_kwargs(self, kwargs: Dict[str, Union[str, Any]]) -> None: + """Set kwargs for torch.compile. Kwargs that are set in DeepSpeed config will be overwritten. + You can also pass a backend name with "backend" key to change the backend. + + Args: + kwargs (Dict[str, Union[str, Any]]): kwargs passed to torch.compile. + """ + + if "backend" in kwargs: + raise ValueError("backend cannot be set as compile kwargs. Use set_backend instead.") + self._compile_kwargs.update(kwargs) + + def set_compiler_fn(self, compiler_fn: Callable) -> None: + """Set a function to be used for compiling the module. + This function should take a torch.nn.Module as input and return a compiled module. + Note that other compile options are ignored when a compiler_fn is set. + + Example: + ```python + def my_compiler_fn(module: torch.nn.Module): + ... + return torch.compile(module, ...) + + engine.set_compiler_fn(my_compiler_fn) + ``` + """ + self._compiler_fn = compiler_fn + + def forward(self, *args, **kwargs) -> Any: + if not self.is_compiled: + if self._compiler_fn is None: + self.__dict__['wrapped'] = torch.compile(self.wrapped, backend=self._backend, **self._compile_kwargs) + else: + self.__dict__['wrapped'] = self._compiler_fn(self.wrapped) + self._is_compiled = True + + return self.__dict__['wrapped'](*args, **kwargs) + + @property + def is_compiled(self) -> bool: + return self._is_compiled + + @property + def backend(self) -> Union[str, Callable]: + return self._backend + + @property + def torch_compile_kwargs(self) -> Dict[str, Any]: + return self._compile_kwargs + + @property + def compiler_fn(self) -> Union[Callable, None]: + return self._compiler_fn diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/config.py new file mode 100644 index 0000000000000000000000000000000000000000..19b169086be1d0d7e5fa0b2d1ed0a5067692d61a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/config.py @@ -0,0 +1,1039 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from typing import Union +from enum import Enum + +import torch +import json +import hjson +import copy +import base64 + +from .constants import * +from .fp16.loss_scaler import ( + INITIAL_LOSS_SCALE, + SCALE_WINDOW, + DELAYED_SHIFT, + CONSECUTIVE_HYSTERESIS, + MIN_LOSS_SCALE, +) +from .config_utils import ( + get_scalar_param, + dict_raise_error_on_duplicate_keys, + ScientificNotationEncoder, +) +from .zero.config import get_zero_config, ZeroStageEnum +from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig +from ..comm.config import DeepSpeedCommsConfig +from ..monitor.config import get_monitor_config +from ..inference.config import WeightQuantConfig +from .compiler import get_compile_config + +from deepspeed import comm as dist +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + +from ..git_version_info import version as __version__ +from ..utils import logger + +from ..elasticity import ( + elasticity_enabled, + compute_elastic_config, + ensure_immutable_elastic_config, +) +from ..elasticity.config import ElasticityConfigError +from ..elasticity.constants import ( + ELASTICITY, + IGNORE_NON_ELASTIC_BATCH_INFO, + IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT, + MODEL_PARALLEL_SIZE, + MODEL_PARALLEL_SIZE_DEFAULT, + NUM_GPUS_PER_NODE, + NUM_GPUS_PER_NODE_DEFAULT, +) + +from ..profiling.config import DeepSpeedFlopsProfilerConfig +from ..autotuning.config import DeepSpeedAutotuningConfig +from ..nebula.config import DeepSpeedNebulaConfig + +from ..compression.config import get_compression_config, get_quantize_enabled +from ..compression.constants import * +from .swap_tensor.aio_config import get_aio_config + +from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy +from .data_pipeline.constants import * + +TENSOR_CORE_ALIGN_SIZE = 8 + +ADAGRAD_OPTIMIZER = 'adagrad' +ADAM_OPTIMIZER = 'adam' +ADAMW_OPTIMIZER = 'adamw' +LAMB_OPTIMIZER = 'lamb' +ONEBIT_ADAM_OPTIMIZER = 'onebitadam' +ZERO_ONE_ADAM_OPTIMIZER = 'zerooneadam' +ONEBIT_LAMB_OPTIMIZER = 'onebitlamb' +MUADAM_OPTIMIZER = 'muadam' +MUADAMW_OPTIMIZER = 'muadamw' +MUSGD_OPTIMIZER = 'musgd' +LION_OPTIMIZER = 'lion' +DEEPSPEED_OPTIMIZERS = [ + ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, + ZERO_ONE_ADAM_OPTIMIZER, MUADAM_OPTIMIZER, MUADAMW_OPTIMIZER, MUSGD_OPTIMIZER, LION_OPTIMIZER +] + +# extra optimizer parameters for adam/adamw +TORCH_ADAM_PARAM = "torch_adam" + +# default to adamw logic for adam/adamw optimizers unless user explicitly opts out +ADAM_W_MODE = "adam_w_mode" +ADAM_W_MODE_DEFAULT = True + + +class DeepSpeedConfigError(Exception): + pass + + +class DtypeEnum(Enum): + # The torch dtype must always be the first value (so we return torch.dtype) + fp16 = torch.float16, "torch.float16", "fp16", "float16", "half" + fp32 = torch.float32, "torch.float32", "fp32", "float32", "float" + int8 = torch.int8, "torch.int8", "int8" + bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16" + + # Copied from https://stackoverflow.com/a/43210118 + # Allows us to use multiple values for each Enum index and returns first + # listed value when Enum is called + def __new__(cls, *values): + obj = object.__new__(cls) + # first value is canonical value + obj._value_ = values[0] + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + def __repr__(self): + return "<%s.%s: %s>" % ( + self.__class__.__name__, + self._name_, + ", ".join([repr(v) for v in self._all_values]), + ) + + +def get_pld_enabled(param_dict): + if PROGRESSIVE_LAYER_DROP in param_dict.keys(): + return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], PLD_ENABLED, PLD_ENABLED_DEFAULT) + else: + return False + + +def get_pld_params(param_dict): + if PROGRESSIVE_LAYER_DROP in param_dict.keys(): + pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP]) + pld_params.pop(PLD_ENABLED) + return pld_params + else: + return False + + +def get_amp_enabled(param_dict): + if AMP in param_dict.keys(): + return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT) + else: + return False + + +def get_amp_params(param_dict): + if AMP in param_dict.keys(): + amp_params = copy.copy(param_dict[AMP]) + amp_params.pop(AMP_ENABLED) + return amp_params + else: + return False + + +def get_fp16_enabled(param_dict): + if FP16 in param_dict.keys(): + return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT) + else: + return False + + +def get_bfloat16_enabled(param_dict): + for key in [BFLOAT16, BFLOAT16_OLD]: + if key in param_dict.keys(): + return get_scalar_param(param_dict[key], BFLOAT16_ENABLED, BFLOAT16_ENABLED_DEFAULT) + return False + + +def get_bfloat16_immediate_grad_update(param_dict): + for key in [BFLOAT16, BFLOAT16_OLD]: + if key in param_dict.keys(): + return get_scalar_param(param_dict[key], BFLOAT16_IMMEDIATE_GRAD_UPDATE, + BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT) + return False + + +def get_fp16_master_weights_and_grads_enabled(param_dict): + if get_fp16_enabled(param_dict): + return get_scalar_param(param_dict[FP16], FP16_MASTER_WEIGHTS_AND_GRADS, FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT) + else: + return False + + +def get_fp16_auto_cast(param_dict): + if get_fp16_enabled(param_dict): + return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT) + + +def get_loss_scale(param_dict): + if get_fp16_enabled(param_dict): + return get_scalar_param(param_dict[FP16], FP16_LOSS_SCALE, FP16_LOSS_SCALE_DEFAULT) + elif get_bfloat16_enabled(param_dict): + return 1.0 + else: + return FP16_LOSS_SCALE_DEFAULT + + +def get_initial_dynamic_scale(param_dict): + if get_fp16_enabled(param_dict): + initial_scale_power = get_scalar_param(param_dict[FP16], FP16_INITIAL_SCALE_POWER, + FP16_INITIAL_SCALE_POWER_DEFAULT) + elif get_bfloat16_enabled(param_dict): + initial_scale_power = 0 + else: + initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT + + return 2**initial_scale_power + + +def get_dynamic_loss_scale_args(param_dict): + loss_scale_args = None + if get_fp16_enabled(param_dict): + fp16_dict = param_dict[FP16] + dynamic_loss_args = [ + FP16_INITIAL_SCALE_POWER, + FP16_LOSS_SCALE_WINDOW, + FP16_MIN_LOSS_SCALE, + FP16_HYSTERESIS, + FP16_CONSECUTIVE_HYSTERESIS, + ] + if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args): + init_scale = get_scalar_param(fp16_dict, FP16_INITIAL_SCALE_POWER, FP16_INITIAL_SCALE_POWER_DEFAULT) + scale_window = get_scalar_param(fp16_dict, FP16_LOSS_SCALE_WINDOW, FP16_LOSS_SCALE_WINDOW_DEFAULT) + delayed_shift = get_scalar_param(fp16_dict, FP16_HYSTERESIS, FP16_HYSTERESIS_DEFAULT) + consecutive_hysteresis = get_scalar_param(fp16_dict, FP16_CONSECUTIVE_HYSTERESIS, + FP16_CONSECUTIVE_HYSTERESIS_DEFAULT) + min_loss_scale = get_scalar_param(fp16_dict, FP16_MIN_LOSS_SCALE, FP16_MIN_LOSS_SCALE_DEFAULT) + loss_scale_args = { + INITIAL_LOSS_SCALE: 2**init_scale, + SCALE_WINDOW: scale_window, + DELAYED_SHIFT: delayed_shift, + CONSECUTIVE_HYSTERESIS: consecutive_hysteresis, + MIN_LOSS_SCALE: min_loss_scale, + } + + return loss_scale_args + + +def get_gradient_accumulation_steps(param_dict): + return get_scalar_param(param_dict, GRADIENT_ACCUMULATION_STEPS, GRADIENT_ACCUMULATION_STEPS_DEFAULT) + + +def get_sparse_gradients_enabled(param_dict): + return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT) + + +def get_communication_data_type(param_dict, + comm_type=COMMUNICATION_DATA_TYPE, + comm_data_type_default=COMMUNICATION_DATA_TYPE_DEFAULT): + val = get_scalar_param(param_dict, comm_type, comm_data_type_default) + val = val.lower() if val is not None else val + if val is None: + return val # we must determine it by other parameters + elif val == "fp32": + return torch.float32 + elif val == "fp16": + return torch.float16 + elif val == "bf16": + return torch.bfloat16 + + raise ValueError(f"Invalid communication_data_type. Supported data types: ['fp16', 'bf16', 'fp32']. Got: {val}") + + +def get_prescale_gradients(param_dict): + return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT) + + +def get_gradient_predivide_factor(param_dict): + return get_scalar_param(param_dict, GRADIENT_PREDIVIDE_FACTOR, GRADIENT_PREDIVIDE_FACTOR_DEFAULT) + + +def get_steps_per_print(param_dict): + return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT) + + +def get_disable_allgather(param_dict): + return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT) + + +def get_dump_state(param_dict): + return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT) + + +def get_gradient_clipping(param_dict): + return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT) + + +def get_graph_harvesting(param_dict): + return get_scalar_param(param_dict, GRAPH_HARVESTING, GRAPH_HARVESTING_DEFAULT) + + +def get_sparse_attention(param_dict): + if SPARSE_ATTENTION in param_dict.keys(): + sparsity = param_dict[SPARSE_ATTENTION] + mode = get_sparse_attention_mode(sparsity) + + if mode == SPARSE_DENSE_MODE: + return get_sparse_dense_config(sparsity) + elif mode == SPARSE_FIXED_MODE: + return get_sparse_fixed_config(sparsity) + elif mode == SPARSE_VARIABLE_MODE: + return get_sparse_variable_config(sparsity) + elif mode == SPARSE_BIGBIRD_MODE: + return get_sparse_bigbird_config(sparsity) + elif mode == SPARSE_BSLONGFORMER_MODE: + return get_sparse_bslongformer_config(sparsity) + else: + raise NotImplementedError(f"Given sparsity mode, {mode}, has not been implemented yet!") + + else: + return None + + +def get_sparse_dense_config(sparsity): + block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT) + return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block} + + +def get_sparse_fixed_config(sparsity): + block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT) + different_layout_per_head = get_scalar_param( + sparsity, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT, + ) + num_local_blocks = get_scalar_param(sparsity, SPARSE_NUM_LOCAL_BLOCKS, SPARSE_NUM_LOCAL_BLOCKS_DEFAULT) + num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT) + attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT) + horizontal_global_attention = get_scalar_param( + sparsity, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT, + ) + num_different_global_patterns = get_scalar_param( + sparsity, + SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS, + SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT, + ) + + return { + SPARSE_MODE: SPARSE_FIXED_MODE, + SPARSE_BLOCK: block, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head, + SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks, + SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks, + SPARSE_ATTENTION_TYPE: attention, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention, + SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns, + } + + +def get_sparse_variable_config(sparsity): + block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT) + different_layout_per_head = get_scalar_param( + sparsity, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT, + ) + num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT) + local_window_blocks = get_scalar_param(sparsity, SPARSE_LOCAL_WINDOW_BLOCKS, SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT) + global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT) + global_block_end_indices = get_scalar_param( + sparsity, + SPARSE_GLOBAL_BLOCK_END_INDICES, + SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT, + ) + attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT) + horizontal_global_attention = get_scalar_param( + sparsity, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT, + ) + + return { + SPARSE_MODE: SPARSE_VARIABLE_MODE, + SPARSE_BLOCK: block, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head, + SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks, + SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks, + SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices, + SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices, + SPARSE_ATTENTION_TYPE: attention, + SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention, + } + + +def get_sparse_bigbird_config(sparsity): + block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT) + different_layout_per_head = get_scalar_param( + sparsity, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT, + ) + num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT) + num_sliding_window_blocks = get_scalar_param( + sparsity, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT, + ) + num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT) + + return { + SPARSE_MODE: SPARSE_BIGBIRD_MODE, + SPARSE_BLOCK: block, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head, + SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks, + SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks, + } + + +def get_sparse_bslongformer_config(sparsity): + block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT) + different_layout_per_head = get_scalar_param( + sparsity, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT, + ) + num_sliding_window_blocks = get_scalar_param( + sparsity, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT, + ) + global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT) + global_block_end_indices = get_scalar_param( + sparsity, + SPARSE_GLOBAL_BLOCK_END_INDICES, + SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT, + ) + + return { + SPARSE_MODE: SPARSE_BSLONGFORMER_MODE, + SPARSE_BLOCK: block, + SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head, + SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks, + SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices, + SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices, + } + + +def get_sparse_attention_mode(param_dict): + if SPARSE_MODE in param_dict.keys(): + return param_dict[SPARSE_MODE] + else: + return SPARSE_MODE_DEFAULT + + +def get_sparse_attention_type(param_dict): + if SPARSE_ATTENTION_TYPE in param_dict.keys(): + return param_dict[SPARSE_ATTENTION_TYPE] + else: + return SPARSE_ATTENTION_TYPE_DEFAULT + + +def get_pipeline_config(param_dict): + """Parses pipeline engine configuration. """ + default_pipeline = { + "stages": "auto", + "partition": "best", + "seed_layers": False, + "activation_checkpoint_interval": 0, + "pipe_partitioned": True, + "grad_partitioned": True, + } + config = default_pipeline + for key, val in param_dict.get("pipeline", {}).items(): + config[key] = val + return config + + +def get_optimizer_name(param_dict): + if OPTIMIZER in param_dict.keys() and TYPE in param_dict[OPTIMIZER].keys(): + return param_dict[OPTIMIZER][TYPE] + else: + return OPTIMIZER_TYPE_DEFAULT + + +def get_optimizer_params(param_dict): + if (get_optimizer_name(param_dict) is not None and OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys()): + return param_dict[OPTIMIZER][OPTIMIZER_PARAMS] + else: + return None + + +def get_optimizer_gradient_clipping(param_dict): + optimizer_params = get_optimizer_params(param_dict) + if optimizer_params is not None and MAX_GRAD_NORM in optimizer_params.keys(): + return optimizer_params[MAX_GRAD_NORM] + else: + return None + + +def get_optimizer_legacy_fusion(param_dict): + if OPTIMIZER in param_dict.keys() and LEGACY_FUSION in param_dict[OPTIMIZER].keys(): + return param_dict[OPTIMIZER][LEGACY_FUSION] + else: + return LEGACY_FUSION_DEFAULT + + +def get_zero_allow_untested_optimizer(param_dict): + return get_scalar_param(param_dict, ZERO_ALLOW_UNTESTED_OPTIMIZER, ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT) + + +def get_zero_force_ds_cpu_optimizer(param_dict): + return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT) + + +def get_scheduler_name(param_dict): + if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys(): + return param_dict[SCHEDULER][TYPE] + else: + return SCHEDULER_TYPE_DEFAULT + + +def get_scheduler_params(param_dict): + if (get_scheduler_name(param_dict) is not None and SCHEDULER_PARAMS in param_dict[SCHEDULER].keys()): + return param_dict[SCHEDULER][SCHEDULER_PARAMS] + else: + return None + + +def get_train_batch_size(param_dict): + return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT) + + +def get_train_micro_batch_size_per_gpu(param_dict): + return get_scalar_param( + param_dict, + TRAIN_MICRO_BATCH_SIZE_PER_GPU, + TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT, + ) + + +def get_wall_clock_breakdown(param_dict): + return get_scalar_param(param_dict, WALL_CLOCK_BREAKDOWN, WALL_CLOCK_BREAKDOWN_DEFAULT) + + +def get_memory_breakdown(param_dict): + return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT) + + +class HybridEngineConfig(DeepSpeedConfigModel): + enabled: bool = False + max_out_tokens: int = 512 + inference_tp_size: int = 1 + release_inference_cache: bool = False + pin_parameters: bool = True + tp_gather_partition_size: int = 8 + + +def get_hybrid_engine_config(param_dict): + hybrid_engine_config_dict = param_dict.get("hybrid_engine", {}) + hybrid_engine_config = HybridEngineConfig(**hybrid_engine_config_dict) + return hybrid_engine_config + + +def get_expert_data_topo_config(param_dict): + return get_scalar_param(param_dict, USE_DATA_BEFORE_EXPERT_PARALLEL, USE_DATA_BEFORE_EXPERT_PARALLEL_DEFAULT) + + +def get_eigenvalue_config(param_dict): + if get_quantize_enabled(param_dict): + param_dict = param_dict[QUANTIZE_TRAINING] + assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled" + return ( + get_eigenvalue_enabled(param_dict), + get_eigenvalue_verbose(param_dict), + get_eigenvalue_max_iter(param_dict), + get_eigenvalue_tol(param_dict), + get_eigenvalue_stability(param_dict), + get_eigenvalue_gas_boundary_resolution(param_dict), + get_eigenvalue_layer_name(param_dict), + get_eigenvalue_layer_num(param_dict), + ) + else: + return ( + EIGENVALUE_ENABLED_DEFAULT, + EIGENVALUE_VERBOSE_DEFAULT, + EIGENVALUE_MAX_ITER_DEFAULT, + EIGENVALUE_TOL_DEFAULT, + EIGENVALUE_STABILITY_DEFAULT, + EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT, + EIGENVALUE_LAYER_NAME_DEFAULT, + EIGENVALUE_LAYER_NUM_DEFAULT, + ) + + +def get_eigenvalue_enabled(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_ENABLED, EIGENVALUE_ENABLED_DEFAULT) + else: + return EIGENVALUE_ENABLED_DEFAULT + + +def get_eigenvalue_verbose(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_VERBOSE, EIGENVALUE_VERBOSE_DEFAULT) + else: + return EIGENVALUE_VERBOSE_DEFAULT + + +def get_eigenvalue_max_iter(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_MAX_ITER, EIGENVALUE_MAX_ITER_DEFAULT) + else: + return EIGENVALUE_MAX_ITER_DEFAULT + + +def get_eigenvalue_tol(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_TOL, EIGENVALUE_TOL_DEFAULT) + else: + return EIGENVALUE_TOL_DEFAULT + + +def get_eigenvalue_stability(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_STABILITY, EIGENVALUE_STABILITY_DEFAULT) + else: + return EIGENVALUE_STABILITY_DEFAULT + + +def get_eigenvalue_gas_boundary_resolution(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param( + param_dict[EIGENVALUE], + EIGENVALUE_GAS_BOUNDARY_RESOLUTION, + EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT, + ) + else: + return EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT + + +def get_eigenvalue_layer_name(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NAME, EIGENVALUE_LAYER_NAME_DEFAULT) + else: + return EIGENVALUE_LAYER_NAME_DEFAULT + + +def get_eigenvalue_layer_num(param_dict): + if EIGENVALUE in param_dict.keys(): + return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NUM, EIGENVALUE_LAYER_NUM_DEFAULT) + else: + return EIGENVALUE_LAYER_NUM_DEFAULT + + +def get_checkpoint_params(param_dict): + return param_dict.get(CHECKPOINT, {}) + + +def get_data_types_params(param_dict): + return param_dict.get(DATA_TYPES, {}) + + +def get_checkpoint_tag_validation_mode(checkpoint_params): + tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT) + tag_validation_mode = tag_validation_mode.upper() + if tag_validation_mode in CHECKPOINT_TAG_VALIDATION_MODES: + return tag_validation_mode + else: + raise DeepSpeedConfigError( + "Checkpoint config contains invalid tag_validation " + f"value of {tag_validation_mode}, expecting one of {CHECKPOINT_TAG_VALIDATION_MODES}") + + +def get_checkpoint_parallel_write_pipeline(checkpoint_params): + par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {}) + par_write_pipeline = par_write_params.get(CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE, + CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT) + if par_write_pipeline in [True, False]: + return par_write_pipeline + else: + raise DeepSpeedConfigError("checkpoint::parallel_write::pipeline_stage " + f"value of '{par_write_pipeline}' is invalid, expecting: true or false") + + +def get_dataloader_drop_last(param_dict): + return get_scalar_param(param_dict, DATALOADER_DROP_LAST, DATALOADER_DROP_LAST_DEFAULT) + + +'''Write deepspeed config files by modifying basic templates. +Can be used for quickly changing parameters via command line parameters.''' + + +class DeepSpeedConfigWriter: + + def __init__(self, data=None): + self.data = data if data is not None else {} + + def add_config(self, key, value): + self.data[key] = value + + def load_config(self, filename): + self.data = json.load(open(filename, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) + + def write_config(self, filename): + with open(filename, "w") as outfile: + json.dump(self.data, outfile) + + +class DeepSpeedConfig(object): + + def __init__(self, config: Union[str, dict], mpu=None): + super(DeepSpeedConfig, self).__init__() + if isinstance(config, dict): + self._param_dict = config + elif os.path.exists(config): + self._param_dict = hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) + else: + try: + config_decoded = base64.urlsafe_b64decode(config).decode('utf-8') + self._param_dict = hjson.loads(config_decoded) + except (UnicodeDecodeError, AttributeError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}" + ) + try: + self.global_rank = dist.get_rank() + if mpu is None: + self.world_size = dist.get_world_size() + else: + self.world_size = mpu.get_data_parallel_world_size() + except: + self.global_rank = 0 + self.world_size = 1 + + # If elastic-mode enabled, update compute + update _param_dict + self.elasticity_enabled = elasticity_enabled(self._param_dict) + if self.elasticity_enabled: + logger.info("DeepSpeed elasticity support enabled") + final_batch_size, valid_gpus, micro_batch_size = compute_elastic_config( + ds_config=self._param_dict, + target_deepspeed_version=__version__, + world_size=self.world_size, + ) + + elastic_dict = self._param_dict[ELASTICITY] + + # Ensure the resource scheduler saw the same elastic config we are using at runtime + ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict) + + self.elastic_model_parallel_size = elastic_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT) + if self.elastic_model_parallel_size < 1: + raise ElasticityConfigError("Model-Parallel size cannot be less than 1, " + f"given model-parallel size: {self.elastic_model_parallel_size}") + + self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT) + if self.num_gpus_per_node < 1: + raise ElasticityConfigError("NUmber of GPUs per node cannot be less than 1, " + f"given number of GPUs per node: {self.num_gpus_per_node}") + + ignore_non_elastic_batch_info = elastic_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO, + IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT) + + if not ignore_non_elastic_batch_info: + batch_params = [ + TRAIN_BATCH_SIZE, + TRAIN_MICRO_BATCH_SIZE_PER_GPU, + GRADIENT_ACCUMULATION_STEPS, + ] + if any(map(lambda t: t in self._param_dict, batch_params)): + raise ElasticityConfigError("One or more batch related parameters were found in your " \ + f"ds_config ({TRAIN_BATCH_SIZE}, {TRAIN_MICRO_BATCH_SIZE_PER_GPU}, and/or " \ + f"{GRADIENT_ACCUMULATION_STEPS}). These parameters *will not be used* since " \ + "elastic training is enabled, which takes control of these parameters. " \ + "If you want to suppress this error (the parameters will be silently ignored) " \ + f"please set {IGNORE_NON_ELASTIC_BATCH_INFO}':true in your elasticity config.") + + # micro_bsz * world_size * gas = total_batch_size + # gas = total_batch_size // (micro_bsz * world_size) + gradient_accu_steps = final_batch_size // (micro_batch_size * self.world_size) + + if TRAIN_BATCH_SIZE in self._param_dict: + logger.warning("[Elasticity] overriding training_batch_size: " + f"{self._param_dict[TRAIN_BATCH_SIZE]} -> {final_batch_size}") + if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self._param_dict: + logger.warning("[Elasticity] overriding train_micro_batch_size_per_gpu: " + f"{self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU]} -> {micro_batch_size}") + if GRADIENT_ACCUMULATION_STEPS in self._param_dict: + logger.warning("[Elasticity] overriding gradient_accumulation_steps: " + f"{self._param_dict[GRADIENT_ACCUMULATION_STEPS]} -> {gradient_accu_steps}") + + logger.info(f"[Elasticity] valid GPU counts: {valid_gpus}") + + self._param_dict[TRAIN_BATCH_SIZE] = final_batch_size + self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size + self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps + + # Pass a copy so that user json is unmodified, e.g. for logging + self._initialize_params(copy.copy(self._param_dict)) + self._configure_train_batch_size() + self._do_sanity_check() + + def _initialize_params(self, param_dict): + self.train_batch_size = get_train_batch_size(param_dict) + #print(f"beginning get_train_batch_size = {get_train_batch_size}") + self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(param_dict) + self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict) + self.steps_per_print = get_steps_per_print(param_dict) + self.dump_state = get_dump_state(param_dict) + + self.disable_allgather = get_disable_allgather(param_dict) + self.communication_data_type = get_communication_data_type(param_dict) + self.seq_parallel_communication_data_type = get_communication_data_type( + param_dict, SEQ_PARALLEL_COMMUNICATION_DATA_TYPE, SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_DEFAULT) + self.prescale_gradients = get_prescale_gradients(param_dict) + self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict) + self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict) + + self.zero_config = get_zero_config(param_dict) + self.mics_shard_size = self.zero_config.mics_shard_size + self.mics_hierarchial_params_gather = self.zero_config.mics_hierarchical_params_gather + self.zero_optimization_stage = self.zero_config.stage + self.zero_enabled = self.zero_optimization_stage > 0 + + self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(param_dict) + + self.comms_config = DeepSpeedCommsConfig(param_dict) + self.monitor_config = get_monitor_config(param_dict) + + self.gradient_clipping = get_gradient_clipping(param_dict) + self.fp16_enabled = get_fp16_enabled(param_dict) + self.fp16_auto_cast = get_fp16_auto_cast(param_dict) + self.bfloat16_enabled = get_bfloat16_enabled(param_dict) + self.bfloat16_immediate_grad_update = get_bfloat16_immediate_grad_update(param_dict) + assert not (self.fp16_enabled + and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled' + self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict) + self.amp_enabled = get_amp_enabled(param_dict) + self.amp_params = get_amp_params(param_dict) + self.loss_scale = get_loss_scale(param_dict) + self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict) + self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict) + + self.compression_config = get_compression_config(param_dict) + self.graph_harvesting = get_graph_harvesting(param_dict) + + self.optimizer_name = get_optimizer_name(param_dict) + if (self.optimizer_name is not None and self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS): + self.optimizer_name = self.optimizer_name.lower() + + self.optimizer_params = get_optimizer_params(param_dict) + self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict) + + self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(param_dict) + + self.zero_force_ds_cpu_optimizer = get_zero_force_ds_cpu_optimizer(param_dict) + + self.scheduler_name = get_scheduler_name(param_dict) + self.scheduler_params = get_scheduler_params(param_dict) + + self.flops_profiler_config = DeepSpeedFlopsProfilerConfig(param_dict) + self.wall_clock_breakdown = (get_wall_clock_breakdown(param_dict) | self.flops_profiler_config.enabled) + self.memory_breakdown = get_memory_breakdown(param_dict) + self.autotuning_config = DeepSpeedAutotuningConfig(param_dict) + + ( + self.eigenvalue_enabled, + self.eigenvalue_verbose, + self.eigenvalue_max_iter, + self.eigenvalue_tol, + self.eigenvalue_stability, + self.eigenvalue_gas_boundary_resolution, + self.eigenvalue_layer_name, + self.eigenvalue_layer_num, + ) = get_eigenvalue_config(param_dict) + + self.use_data_before_expert_parallel_ = get_expert_data_topo_config(param_dict) + self.hybrid_engine = get_hybrid_engine_config(param_dict) + + self.sparse_attention = get_sparse_attention(param_dict) + self.pipeline = get_pipeline_config(param_dict) + + self.pld_enabled = get_pld_enabled(param_dict) + self.pld_params = get_pld_params(param_dict) + + self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict) + self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict) + + self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict) + self.data_efficiency_config = get_data_efficiency_config(param_dict) + + checkpoint_params = get_checkpoint_params(param_dict) + validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params) + self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE) + self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL + self.load_universal_checkpoint = checkpoint_params.get(LOAD_UNIVERSAL_CHECKPOINT, + LOAD_UNIVERSAL_CHECKPOINT_DEFAULT) + + self.use_node_local_storage = checkpoint_params.get(USE_NODE_LOCAL_STORAGE_CHECKPOINT, + USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT) + + data_types_params = get_data_types_params(param_dict) + self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, GRAD_ACCUM_DTYPE_DEFAULT) + + par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params) + self.checkpoint_parallel_write_pipeline = par_write_pipe + + self.aio_config = get_aio_config(param_dict) + + self.dataloader_drop_last = get_dataloader_drop_last(param_dict) + + self.nebula_config = DeepSpeedNebulaConfig(param_dict) + + self.weight_quantization_config = WeightQuantConfig( + **param_dict['weight_quantization']) if 'weight_quantization' in param_dict else None + + self.compile_config = get_compile_config(param_dict) + + def _batch_assertion(self): + + train_batch = self.train_batch_size + micro_batch = self.train_micro_batch_size_per_gpu + grad_acc = self.gradient_accumulation_steps + + assert (train_batch > 0), f"Train batch size: {train_batch} has to be greater than 0" + + assert (micro_batch > 0), f"Micro batch size per gpu: {micro_batch} has to be greater than 0" + + assert (grad_acc > 0), f"Gradient accumulation steps: {grad_acc} has to be greater than 0" + + assert train_batch == micro_batch * grad_acc * self.world_size, ( + f"Check batch related parameters. train_batch_size is not equal " + "to micro_batch_per_gpu * gradient_acc_step * world_size " + f"{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}") + + def _set_batch_related_parameters(self): + + train_batch = self.train_batch_size + micro_batch = self.train_micro_batch_size_per_gpu + grad_acc = self.gradient_accumulation_steps + + #print(f"train_batch = {train_batch}, micro_batch={micro_batch}") + + # all values are provided nothing needs to be set + if train_batch is not None and micro_batch is not None and grad_acc is not None: + return + + # global_accumulation_steps needs to be set + elif train_batch is not None and micro_batch is not None: + grad_acc = train_batch // micro_batch + grad_acc //= self.world_size + self.gradient_accumulation_steps = grad_acc + + # micro_batch_per_gpu needs to be set + elif train_batch is not None and grad_acc is not None: + micro_batch = train_batch // self.world_size + micro_batch //= grad_acc + self.train_micro_batch_size_per_gpu = micro_batch + + # train_batch_size needs to be set + elif micro_batch is not None and grad_acc is not None: + train_batch_size = micro_batch * grad_acc + train_batch_size *= self.world_size + self.train_batch_size = train_batch_size + + # gradient_accumulation_steps and micro_batch_per_gpus is set + elif train_batch is not None: + self.gradient_accumulation_steps = 1 + self.train_micro_batch_size_per_gpu = train_batch // self.world_size + + # train_batch_size and gradient_accumulation_step is set + elif micro_batch is not None: + self.train_batch_size = micro_batch * self.world_size + self.gradient_accumulation_steps = 1 + + # either none of the three parameters are provided or just gradient_accumulation_step is provided + else: + assert False, \ + 'Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided' + + def _configure_train_batch_size(self): + self._set_batch_related_parameters() + self._batch_assertion() + + def _do_sanity_check(self): + self._do_error_check() + + self._do_warning_check() + + def print_user_config(self): + logger.info(" json = {}".format( + json.dumps( + self._param_dict, + sort_keys=True, + indent=4, + cls=ScientificNotationEncoder, + separators=(",", ":"), + ))) + + def print(self, name): + logger.info("{}:".format(name)) + for arg in sorted(vars(self)): + if arg != "_param_dict": + dots = "." * (29 - len(arg)) + logger.info(" {} {} {}".format(arg, dots, getattr(self, arg))) + + self.print_user_config() + + def _do_error_check(self): + assert (self.train_micro_batch_size_per_gpu + ), "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU) + + assert ( + self.gradient_accumulation_steps), "DeepSpeedConfig: {} is not defined".format(GRADIENT_ACCUMULATION_STEPS) + + if self.zero_enabled: + assert (self.zero_optimization_stage <= + ZeroStageEnum.max_stage), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format( + ZeroStageEnum.max_stage) + + if self.fp16_master_weights_and_gradients: + assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now." + + def _do_warning_check(self): + fp16_enabled = self.fp16_enabled + + vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT) + if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0: + logger.warning( + "DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization.".format( + vocabulary_size, TENSOR_CORE_ALIGN_SIZE)) + + if (self.optimizer_params is not None and MAX_GRAD_NORM in self.optimizer_params.keys() + and self.optimizer_params[MAX_GRAD_NORM] > 0): + if fp16_enabled: + if self.global_rank == 0: + logger.warning("DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper".format( + MAX_GRAD_NORM, self.optimizer_params[MAX_GRAD_NORM])) + else: + if self.global_rank == 0: + logger.warning( + "DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero" + .format(self.optimizer_params[MAX_GRAD_NORM])) + self.optimizer_params[MAX_GRAD_NORM] = 0.0 diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/constants.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..679230ca7d4c3f08fb0e4c87d1f251e958081fb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/constants.py @@ -0,0 +1,457 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +############################################# +# Routes +############################################# +ROUTE_TRAIN = "train" +ROUTE_EVAL = "eval" +ROUTE_PREDICT = "predict" +ROUTE_ENCODE = "encode" + +############################################# +# Batch size +############################################# +TRAIN_BATCH_SIZE = "train_batch_size" +TRAIN_BATCH_SIZE_DEFAULT = None + +############################################# +# Sparse attention +############################################# +SPARSE_ATTENTION = "sparse_attention" +SPARSE_DENSE_MODE = "dense" +SPARSE_FIXED_MODE = "fixed" +SPARSE_VARIABLE_MODE = "variable" +SPARSE_BIGBIRD_MODE = "bigbird" +SPARSE_BSLONGFORMER_MODE = "bslongformer" +SPARSE_MODE = "mode" +SPARSE_MODE_DEFAULT = SPARSE_FIXED_MODE +SPARSE_BLOCK = "block" +SPARSE_BLOCK_DEFAULT = 16 +SPARSE_DIFFERENT_LAYOUT_PER_HEAD = "different_layout_per_head" +SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT = False +SPARSE_NUM_LOCAL_BLOCKS = "num_local_blocks" +SPARSE_NUM_LOCAL_BLOCKS_DEFAULT = 4 +SPARSE_NUM_GLOBAL_BLOCKS = "num_global_blocks" +SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT = 1 +SPARSE_ATTENTION_TYPE = "attention" +SPARSE_ATTENTION_TYPE_DEFAULT = "bidirectional" +SPARSE_HORIZONTAL_GLOBAL_ATTENTION = "horizontal_global_attention" +SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT = False +SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS = "num_different_global_patterns" +SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT = 1 +SPARSE_NUM_RANDOM_BLOCKS = "num_random_blocks" +SPARSE_NUM_RANDOM_BLOCKS_DEFAULT = 0 +SPARSE_LOCAL_WINDOW_BLOCKS = "local_window_blocks" +SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT = [4] +SPARSE_GLOBAL_BLOCK_INDICES = "global_block_indices" +SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT = [0] +SPARSE_GLOBAL_BLOCK_END_INDICES = "global_block_end_indices" +SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT = None +SPARSE_NUM_SLIDING_WINDOW_BLOCKS = "num_sliding_window_blocks" +SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT = 3 + +############################################# +# Optimizer and lr scheduler +############################################# +OPTIMIZER = "optimizer" +OPTIMIZER_TYPE_DEFAULT = None +OPTIMIZER_PARAMS = "params" +TYPE = "type" +LEGACY_FUSION = "legacy_fusion" +LEGACY_FUSION_DEFAULT = False +SCHEDULER = "scheduler" +SCHEDULER_TYPE_DEFAULT = None +SCHEDULER_PARAMS = "params" +MAX_GRAD_NORM = 'max_grad_norm' + +############################################# +# Optimizer and lr scheduler +############################################# +ZERO_ALLOW_UNTESTED_OPTIMIZER = "zero_allow_untested_optimizer" +ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT = False +ZERO_FORCE_DS_CPU_OPTIMIZER = "zero_force_ds_cpu_optimizer" +ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT = True + +# Steps +STEPS_PER_PRINT = "steps_per_print" +STEPS_PER_PRINT_DEFAULT = 10 + +######################################### +# Training micro batch size per GPU +######################################### +# Batch size for one training step. This is used when the +# TRAIN_BATCH_SIZE cannot fit in GPU memory to determine +# the number of gradient accumulation steps. By default, this +# is set to None. Users can configure in ds_config.json as below example: +TRAIN_MICRO_BATCH_SIZE_PER_GPU = ''' +TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format: +"train_micro_batch_size_per_gpu": 1 +''' +TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu" +TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = None + +######################################### +# Gradient Accumulation +######################################### +# Gradient accumulation feature. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +GRADIENT_ACCUMULATION_FORMAT = ''' +Gradient Accumulation should be of the format: +"gradient_accumulation_steps": 1 +''' +GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps" +GRADIENT_ACCUMULATION_STEPS_DEFAULT = None + +# DeepSpeed CSR gradient sparsity +SPARSE_GRADIENTS = "sparse_gradients" +SPARSE_GRADIENTS_DEFAULT = False + +######################################### +# BFLOAT16 support +######################################### +# BFLOAT16 feature. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +BFLOAT16_FORMAT = ''' +BFLOAT16 parameters should be of the format: +"bf16": { + "enabled": true +} +''' +BFLOAT16 = "bf16" +BFLOAT16_OLD = "bfloat16" # keeping for backwards compatibility + +BFLOAT16_ENABLED = "enabled" +BFLOAT16_ENABLED_DEFAULT = False + +# BFLOAT16 optimizer immediate gradient update +BFLOAT16_IMMEDIATE_GRAD_UPDATE = "immediate_grad_update" +BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT = False + +######################################### +# FP16 support +######################################### +# FP16 feature. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +FP16_FORMAT = ''' +FP16 parameters should be of the format: +"fp16": { + "enabled": true, + "auto_cast": false, + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "consecutive_hysteresis": false, + "min_loss_scale": 1 +} +''' +FP16 = "fp16" + +FP16_ENABLED = "enabled" +FP16_ENABLED_DEFAULT = False + +# FP16 loss scale, zero means using dynamic scaling +FP16_LOSS_SCALE = "loss_scale" +FP16_LOSS_SCALE_DEFAULT = 0 + +FP16_AUTO_CAST = "auto_cast" +FP16_AUTO_CAST_DEFAULT = False + +# FP16 initial dynamic scale loss power +FP16_INITIAL_SCALE_POWER = "initial_scale_power" +FP16_INITIAL_SCALE_POWER_DEFAULT = 16 + +# FP16 loss scale window +FP16_LOSS_SCALE_WINDOW = "loss_scale_window" +FP16_LOSS_SCALE_WINDOW_DEFAULT = 1000 + +# FP16 hysteresis +FP16_HYSTERESIS = "hysteresis" +FP16_HYSTERESIS_DEFAULT = 2 + +# FP16 consecutive hysteresis +FP16_CONSECUTIVE_HYSTERESIS = "consecutive_hysteresis" +FP16_CONSECUTIVE_HYSTERESIS_DEFAULT = False + +# FP16 min loss scale +FP16_MIN_LOSS_SCALE = "min_loss_scale" +FP16_MIN_LOSS_SCALE_DEFAULT = 1 + +# FP16 master and grads +FP16_MASTER_WEIGHTS_AND_GRADS = "fp16_master_weights_and_grads" +FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT = False + +######################################### +# Apex AMP support +######################################### +# Use Apex AMP for mixed precision support, all parameters (other than 'enabled') will be passed to +# amp.initialize(model, optimizer, **amp_params) +# See apex documentation for supported parameters/features: https://nvidia.github.io/apex/amp.html#apex.amp.initialize +AMP_FORMAT = ''' +"amp" { + "enabled: true, + "opt_level": "O1", + ... +} +''' +AMP = "amp" + +AMP_ENABLED = "enabled" +AMP_ENABLED_DEFAULT = False + +######################################### +# Gradient clipping +######################################### +# Gradient clipping. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +GRADIENT_CLIPPING_FORMAT = ''' +Gradient clipping should be enabled as: +"gradient_clipping": 1.0 +''' +GRADIENT_CLIPPING = 'gradient_clipping' +GRADIENT_CLIPPING_DEFAULT = 0. + +######################################### +# Capture graph for short kernels sequences +######################################### +# Graph harvesting. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +GRAPH_HARVESTING_FORMAT = ''' +Graph harvesting should be enabled as: +"graph_harvesting": true +''' +GRAPH_HARVESTING = 'graph_harvesting' +GRAPH_HARVESTING_DEFAULT = False + +######################################### +# Communication data type +######################################### +# Supported types: ['none', 'fp16', 'fp32'] +# By default, this feature is not enabled ('none' value) +# Users can configure in ds_config.json as below example: +COMMUNICATION_DATA_TYPE_FORMAT = ''' +Communication data type should be set as: +"communication_data_type": "fp32" +''' +COMMUNICATION_DATA_TYPE = "communication_data_type" +COMMUNICATION_DATA_TYPE_DEFAULT = None + +########################################################### +# Gradient communication data type for sequence parallelism +########################################################### +# Supported types: ['fp16', 'bf16','fp32'] +# Default value is fp32 +# Users can configure in ds_config.json as below example: +SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_FORMAT = ''' +Optional comm data type for seq paralleism should be set as: +"seq_parallel_communication_data_type": "fp32" +''' +SEQ_PARALLEL_COMMUNICATION_DATA_TYPE = "seq_parallel_comm_data_type" +SEQ_PARALLEL_COMMUNICATION_DATA_TYPE_DEFAULT = "fp32" + +######################################### +# Scale/predivide gradients before allreduce +######################################### +# Prescale gradients. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +PRESCALE_GRADIENTS_FORMAT = ''' +Gradient prescaling should be enabled as: +"prescale_gradients": true +''' +PRESCALE_GRADIENTS = "prescale_gradients" +PRESCALE_GRADIENTS_DEFAULT = False + +GRADIENT_PREDIVIDE_FACTOR_FORMAT = ''' +Gradient predivide factor should be enabled as: +"gradient_predivide_factor": 1.0 +''' +GRADIENT_PREDIVIDE_FACTOR = "gradient_predivide_factor" +GRADIENT_PREDIVIDE_FACTOR_DEFAULT = 1.0 + +######################################### +# Disable AllGather +######################################### +# Disable AllGather. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +DISABLE_ALLGATHER_FORMAT = ''' +Disable AllGather should be enabled as: +"disable_allgather": true +''' +DISABLE_ALLGATHER = "disable_allgather" +DISABLE_ALLGATHER_DEFAULT = False + +######################################### +# Dump DeepSpeed state +######################################### +# Dump State. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +DUMP_STATE_FORMAT = ''' +Dump state should be enabled as: +"dump_state": true +''' +DUMP_STATE = 'dump_state' +DUMP_STATE_DEFAULT = False + +######################################### +# Vocabulary size +######################################### +# Vocabulary size. +# Users can configure in ds_config.json as below example: +VOCABULARY_SIZE_FORMAT = ''' +Vocabulary size can be specified as: +"vocabulary_size": 1024 +''' +VOCABULARY_SIZE = 'vocabulary_size' +VOCABULARY_SIZE_DEFAULT = None + +######################################### +# Wall block breakdown +######################################### +# Wall clock breakdown. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +WALL_CLOCK_BREAKDOWN_FORMAT = ''' +Wall block breakdown should be enabled as: +"wall_clock_breakdown": true +''' +WALL_CLOCK_BREAKDOWN = 'wall_clock_breakdown' +WALL_CLOCK_BREAKDOWN_DEFAULT = False + +MEMORY_BREAKDOWN = 'memory_breakdown' +MEMORY_BREAKDOWN_DEFAULT = False + +######################################### +# Eigenvalue +######################################### +# Eigenvalue computation. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +EIGENVALUE_FORMAT = ''' +Tensorboard can be specified as: +"eigenvalue": { + "enabled": true, + "verbose": true, + "max_iter": 100, + "tol": 1e-2, + "stability": 1e-6 +} +''' +EIGENVALUE = "eigenvalue" + +# Tensorboard enable signal +EIGENVALUE_ENABLED = "enabled" +EIGENVALUE_ENABLED_DEFAULT = False + +EIGENVALUE_VERBOSE = "verbose" +EIGENVALUE_VERBOSE_DEFAULT = False + +EIGENVALUE_MAX_ITER = "max_iter" +EIGENVALUE_MAX_ITER_DEFAULT = 100 + +EIGENVALUE_TOL = "tol" +EIGENVALUE_TOL_DEFAULT = 1e-2 + +EIGENVALUE_STABILITY = "stability" +EIGENVALUE_STABILITY_DEFAULT = 1e-6 + +EIGENVALUE_GAS_BOUNDARY_RESOLUTION = "gas_boundary_resolution" +EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT = 1 + +EIGENVALUE_LAYER_NAME = "layer_name" +EIGENVALUE_LAYER_NAME_DEFAULT = "bert.encoder.layer" + +EIGENVALUE_LAYER_NUM = "layer_num" +EIGENVALUE_LAYER_NUM_DEFAULT = 0 + +######################################### +# Progressive Layer Drop (PLD) +######################################### +PROGRESSIVE_LAYER_DROP = "progressive_layer_drop" + +# PLD enable signal +PLD_ENABLED = "enabled" +PLD_ENABLED_DEFAULT = False + +PLD_THETA = "theta" +PLD_THETA_DEFAULT = 1.0 + +PLD_GAMMA = "gamma" +PLD_GAMMA_DEFAULT = 0.001 + + +######################################### +# Validation modes +######################################### +class ValidationMode: + WARN = "WARN" + IGNORE = "IGNORE" + FAIL = "FAIL" + + +######################################### +# Checkpoint config params +######################################### +# "checkpoint": { +# tag_validation=["Ignore"|"Warn"|"Fail"] +# load_universal=false +# use_node_local_storage=false +# parallel_write: { +# pipeline_stage: [True|False] +# } +# } +CHECKPOINT = "checkpoint" +CHECKPOINT_TAG_VALIDATION = "tag_validation" +CHECKPOINT_TAG_VALIDATION_DEFAULT = ValidationMode.WARN +CHECKPOINT_TAG_VALIDATION_MODES = [ValidationMode.WARN, ValidationMode.IGNORE, ValidationMode.FAIL] + +LOAD_UNIVERSAL_CHECKPOINT = "load_universal" +LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False + +USE_NODE_LOCAL_STORAGE_CHECKPOINT = "use_node_local_storage" +USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT = False + +CHECKPOINT_PARALLEL_WRITE = "parallel_write" +CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage" +CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False + +######################################### +# Data types config params +######################################### +# "data_types": { +# grad_accum_dtype=["bf16"|"fp16"|"fp32"] +# } +# } + +DATA_TYPES = "data_types" +GRAD_ACCUM_DTYPE = "grad_accum_dtype" +GRAD_ACCUM_DTYPE_DEFAULT = None + +######################################### +# Drop the last incomplete Batch +# ######################################### +# dataloader_drop_last. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +DATALOADER_DROP_LAST_FORMAT = ''' +The last incomplete batch can be dropped by setting: +"dataloader_drop_last": True +''' +DATALOADER_DROP_LAST = "dataloader_drop_last" +DATALOADER_DROP_LAST_DEFAULT = False + +######################################### +# PIPELINE PARALLELISM +######################################### +PIPE_REPLICATED = 'ds_pipe_replicated' + +######################################### +# DATA PARALLELISM +######################################### +DATA_PARALLEL_GROUP = "data_parallel_group" +GLOBAL_RANK = "global_rank" + +######################################### +# EXPERT-DATA PARALLELISM TOPO Config +######################################### +USE_DATA_BEFORE_EXPERT_PARALLEL = "use_data_before_expert_parallelism" +USE_DATA_BEFORE_EXPERT_PARALLEL_DEFAULT = False diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6234805189254ba4fe3d7d8aac8e7062de721c51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/config.py @@ -0,0 +1,168 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .constants import * +import copy +from ..config_utils import get_scalar_param + + +# TODO: Reducing config verbosity by returning None or {} when disabled. +# One challenge is that we still need to somehow include the default values, +# for example the *_ENABLED has default of false. +def get_data_efficiency_config(param_dict): + output = {} + output[DATA_EFFICIENCY_ENABLED] = get_data_efficiency_enabled(param_dict) + output[DATA_EFFICIENCY_SEED] = get_data_efficiency_seed(param_dict) + if DATA_EFFICIENCY not in param_dict.keys(): + param_dict[DATA_EFFICIENCY] = {} + sub_param_dict = param_dict[DATA_EFFICIENCY] + output[DATA_SAMPLING] = get_data_sampling(sub_param_dict) + output[DATA_ROUTING] = get_data_routing(sub_param_dict) + + return output + + +def get_data_efficiency_enabled(param_dict): + if DATA_EFFICIENCY in param_dict.keys(): + return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_ENABLED, DATA_EFFICIENCY_ENABLED_DEFAULT) + else: + return False + + +def get_data_efficiency_seed(param_dict): + if DATA_EFFICIENCY in param_dict.keys(): + return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_SEED, DATA_EFFICIENCY_SEED_DEFAULT) + else: + return DATA_EFFICIENCY_SEED_DEFAULT + + +def get_data_sampling(param_dict): + output = {} + output[DATA_SAMPLING_ENABLED] = get_data_sampling_enabled(param_dict) + output[DATA_SAMPLING_NUM_EPOCHS] = get_data_sampling_num_epochs(param_dict) + output[DATA_SAMPLING_NUM_WORKERS] = get_data_sampling_num_workers(param_dict) + if DATA_SAMPLING not in param_dict.keys(): + param_dict[DATA_SAMPLING] = {} + sub_param_dict = param_dict[DATA_SAMPLING] + output[CURRICULUM_LEARNING] = get_curriculum_learning(sub_param_dict) + + return output + + +def get_data_sampling_enabled(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_ENABLED, DATA_SAMPLING_ENABLED_DEFAULT) + else: + return False + + +def get_data_sampling_num_epochs(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_EPOCHS, DATA_SAMPLING_NUM_EPOCHS_DEFAULT) + else: + return DATA_SAMPLING_NUM_EPOCHS_DEFAULT + + +def get_data_sampling_num_workers(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_WORKERS, + DATA_SAMPLING_NUM_WORKERS_DEFAULT) + else: + return DATA_SAMPLING_NUM_WORKERS_DEFAULT + + +def get_curriculum_learning(param_dict): + output = {} + output[CURRICULUM_LEARNING_ENABLED] = get_curriculum_learning_enabled(param_dict) + if CURRICULUM_LEARNING not in param_dict.keys(): + param_dict[CURRICULUM_LEARNING] = {} + sub_param_dict = param_dict[CURRICULUM_LEARNING] + if output[CURRICULUM_LEARNING_ENABLED]: + assert CURRICULUM_LEARNING_METRICS in sub_param_dict.keys( + ), f"Curriculum learning is enabled, {CURRICULUM_LEARNING_METRICS} must be specified" + for key, val in get_curriculum_learning_params(param_dict).items(): + output[key] = val + return output + + +def get_curriculum_learning_enabled(param_dict): + if CURRICULUM_LEARNING in param_dict.keys(): + return get_scalar_param(param_dict[CURRICULUM_LEARNING], CURRICULUM_LEARNING_ENABLED, + CURRICULUM_LEARNING_ENABLED_DEFAULT) + else: + return False + + +def get_curriculum_learning_params(param_dict): + if CURRICULUM_LEARNING in param_dict.keys(): + curriculum_learning_params = copy.copy(param_dict[CURRICULUM_LEARNING]) + curriculum_learning_params.pop(CURRICULUM_LEARNING_ENABLED) + return curriculum_learning_params + else: + return {} + + +def get_curriculum_enabled_legacy(param_dict): + if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): + return get_scalar_param(param_dict[CURRICULUM_LEARNING_LEGACY], CURRICULUM_ENABLED_LEGACY, + CURRICULUM_ENABLED_DEFAULT_LEGACY) + else: + return False + + +def get_curriculum_params_legacy(param_dict): + if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): + curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING_LEGACY]) + curriculum_params.pop(CURRICULUM_ENABLED_LEGACY) + return curriculum_params + else: + return False + + +def get_data_routing(param_dict): + output = {} + output[DATA_ROUTING_ENABLED] = get_data_routing_enabled(param_dict) + if DATA_ROUTING not in param_dict.keys(): + param_dict[DATA_ROUTING] = {} + sub_param_dict = param_dict[DATA_ROUTING] + output[RANDOM_LTD] = get_random_ltd(sub_param_dict) + + return output + + +def get_data_routing_enabled(param_dict): + if DATA_ROUTING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_ROUTING], DATA_ROUTING_ENABLED, DATA_ROUTING_ENABLED_DEFAULT) + else: + return False + + +def get_random_ltd(param_dict): + output = {} + output[RANDOM_LTD_ENABLED] = RANDOM_LTD_ENABLED_DEFAULT + output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE] = {} + output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][ + RANDOM_LTD_LAYER_TOKEN_LR_ENABLED] = RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT + if get_random_ltd_enabled(param_dict): + output[RANDOM_LTD_ENABLED] = get_random_ltd_enabled(param_dict) + for key, val in get_random_ltd_params(param_dict).items(): + output[key] = val + return output + + +def get_random_ltd_enabled(param_dict): + if RANDOM_LTD in param_dict.keys(): + return get_scalar_param(param_dict[RANDOM_LTD], RANDOM_LTD_ENABLED, RANDOM_LTD_ENABLED_DEFAULT) + else: + return False + + +def get_random_ltd_params(param_dict): + if RANDOM_LTD in param_dict.keys(): + random_ltd_params = copy.copy(param_dict[RANDOM_LTD]) + random_ltd_params.pop(RANDOM_LTD_ENABLED) + return random_ltd_params + else: + return {} diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/constants.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..1ade640e38d949da8caa051ec40d9f096d83f064 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/constants.py @@ -0,0 +1,116 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Data efficiency library + See sample config at https://www.deepspeed.ai/docs/config-json/data-efficiency +""" +DATA_EFFICIENCY = "data_efficiency" +DATA_EFFICIENCY_ENABLED = "enabled" +DATA_EFFICIENCY_ENABLED_DEFAULT = False +DATA_EFFICIENCY_SEED = "seed" +DATA_EFFICIENCY_SEED_DEFAULT = 1234 + +######################################### +# Data efficiency - Data Sampling +######################################### +DATA_SAMPLING = "data_sampling" +DATA_SAMPLING_ENABLED = "enabled" +DATA_SAMPLING_ENABLED_DEFAULT = False +DATA_SAMPLING_NUM_EPOCHS = "num_epochs" +DATA_SAMPLING_NUM_EPOCHS_DEFAULT = 1000 +DATA_SAMPLING_NUM_WORKERS = "num_workers" +DATA_SAMPLING_NUM_WORKERS_DEFAULT = 0 + +######################################### +# Data efficiency - Data Sampling - Curriculum Learning +######################################### +CURRICULUM_LEARNING = "curriculum_learning" +CURRICULUM_LEARNING_ENABLED = "enabled" +CURRICULUM_LEARNING_ENABLED_DEFAULT = False +CURRICULUM_LEARNING_CLUSTER_PATH = "data_cluster_path" +CURRICULUM_LEARNING_METRICS = "curriculum_metrics" +CURRICULUM_LEARNING_SAMPLE_PATH = "index_to_sample_path" +CURRICULUM_LEARNING_METRIC_PATH = "index_to_metric_path" +CURRICULUM_LEARNING_CLUSTERING_TYPE = "clustering_type" +CURRICULUM_LEARNING_SINGLE_CLUSTER = "single_cluster" +CURRICULUM_LEARNING_CLUSTER_PREFIX = "cluster" +CURRICULUM_LEARNING_DIFFICULTY_TYPE = "difficulty_type" +CURRICULUM_LEARNING_VALUE_BASED = "value" +CURRICULUM_LEARNING_PERCENTILE_BASED = "percentile" +CURRICULUM_LEARNING_MIN_DIFFICULTY = "min_difficulty" +CURRICULUM_LEARNING_MAX_DIFFICULTY = "max_difficulty" +CURRICULUM_LEARNING_SCHEDULE_TYPE = "schedule_type" +CURRICULUM_LEARNING_SCHEDULE_CONFIG = "schedule_config" +CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY = "difficulty" +CURRICULUM_LEARNING_SCHEDULE_MAX_STEP = "max_step" +CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP = "total_curriculum_step" +CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP = "difficulty_step" +CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE = "root_degree" +CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE = "fixed_discrete" +CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT = "fixed_root" +CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR = "fixed_linear" +CURRICULUM_LEARNING_SCHEDULE_CUSTOM = "custom" +CURRICULUM_LEARNING_CURRENT_DIFFICULTY = "current_difficulty" + +CURRICULUM_LEARNING_BATCH = "batch" +CURRICULUM_LEARNING_CONSUMED_SAMPLES = "consumed_samples" +CURRICULUM_LEARNING_STEP = "curriculum_step" +CURRICULUM_LEARNING_CURRENT_DIFFICULTIES = "current_difficulties" +CURRICULUM_LEARNING_DATA_CLUSTER_PATHS = "data_cluster_paths" +CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION = "data_cluster_current_position" +CURRICULUM_LEARNING_NP_RNG_STATE = "np_rng_state" + +######################################### +# Curriculum Learning legacy implementation +######################################### +CURRICULUM_LEARNING_LEGACY = "curriculum_learning" + +CURRICULUM_ENABLED_LEGACY = "enabled" +CURRICULUM_ENABLED_DEFAULT_LEGACY = False + +######################################### +# Data efficiency - Data Routing +######################################### +DATA_ROUTING = "data_routing" +DATA_ROUTING_ENABLED = "enabled" +DATA_ROUTING_ENABLED_DEFAULT = False + +######################################### +# Data efficiency - Data Routing - Random LTD +######################################### +RANDOM_LTD = "random_ltd" +RANDOM_LTD_ENABLED = "enabled" +RANDOM_LTD_ENABLED_DEFAULT = False + +RANDOM_LTD_MODEL_MASK_NAME = "model_mask_name" +RANDOM_LTD_MODEL_TYPE = "model_type" +RANDOM_LTD_MICRO_BATCH_SIZE = "micro_batch_size" +RANDOM_LTD_GLOBAL_BATCH_SIZE = "global_batch_size" +RANDOM_LTD_SAMPLE_INDEX = "sample_idx" +RANDOM_LTD_ATTENTION_MASK = "attention_mask" +RANDOM_LTD_HIDDEN_STATE_ORDER = "hidden_state_order" +RANDOM_LTD_LAYER_NUM = "random_ltd_layer_num" +RANDOM_LTD_LAYER_ID = "random_ltd_layer_id" +RANDOM_LTD_TOTAL_LAYER_NUM = "total_layer_num" +RANDOM_LTD_CONSUMED_LAYER_TOKENS = "consumed_layer_tokens" + +# scheduler +RANDOM_LTD_SCHEDULER = "random_ltd_schedule" +RANDOM_LTD_MAX_VALUE = "max_value" +RANDOM_LTD_MIN_VALUE = "min_value" +RANDOM_LTD_CURRENT_VALUE = "current_value" +RANDOM_LTD_SCHEDULE_CONFIG = "schedule_config" +RANDOM_LTD_INCREASE_STEP = "seq_per_step" +RANDOM_LTD_REQUIRE_STEP = "require_steps" +RANDOM_LTD_SCHEDULER_TYPE = "schedule_type" +RANDOM_LTD_CURR_STEP = "current_steps" + +# learning rate schedulers +RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE = "layer_token_lr_schedule" +RANDOM_LTD_LAYER_TOKEN_LR_ENABLED = "enabled" +RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT = False +RANDOM_LTD_TOTAL_LAYER_TOKENS = "total_layer_tokens" +RANDOM_LTD_WARMUP_TYPE = "warmup_type" +RANDOM_LTD_WARMUP_LAYER_TOKENS = "warmup_layer_tokens" diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/curriculum_scheduler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/curriculum_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..23d747957dc4647e06fad0a94e5e4b071b6f6e23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/curriculum_scheduler.py @@ -0,0 +1,158 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +from deepspeed.utils import logger +from .constants import * + + +class CurriculumScheduler(object): + + def __init__(self, config): + super().__init__() + self.state = {} + assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'" + assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'" + assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'" + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY] + self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[CURRICULUM_LEARNING_MAX_DIFFICULTY] + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY] + self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[CURRICULUM_LEARNING_SCHEDULE_TYPE] + self.first_step = True + if config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: + """ + The schedule_config is a list of difficulty and a list of max + step belonging to each difficulty. Example json config: + "schedule_config": { + "difficulty": [1,2,3], + "max_step": [5,10] + } + The "max_step" has one less element than "difficulty", because + the last difficulty will be used for all following steps. + The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of + difficulty : [max step for this difficulty, next difficulty]. + """ + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'" + assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'" + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0 + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0 + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len( + config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1 + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: + """ + The schedule_config includes: + total_curriculum_step: how many steps the curriculum learning takes to go + from min difficulty to max difficulty. + difficulty_step: the difficulty level determined every time must + be a multiple of this difficulty_step. This is used to determine + the step of difficulty increase, and to ensure the use of NVIDIA + Tensor Core acceleration (requires multiple of 8 (FP16) or + 16 (INT8)). + root_degree: the degree of the root function. Degree of 2 means + square root and degree of 3 means cube root. Degree of 1 is + equivalent to linear. + "schedule_config": { + "total_curriculum_step": 30000, + "difficulty_step": 8, + "root_degree": 2 + } + """ + assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'" + if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: + logger.warning( + f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' + ) + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: + """ + The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the + root_degree. + "schedule_config": { + "total_curriculum_step": 30000, + "difficulty_step": 8 + } + """ + assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" + if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: + logger.warning( + f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' + ) + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: + """ + Fully customized schedule. User need to provide a custom schedule + function by using the set_custom_curriculum_learning_schedule API + in deepspeed/runtime/engine.py + """ + self.custom_get_difficulty = None + else: + raise RuntimeError('Unsupported curriculum schedule type') + + def get_current_difficulty(self): + return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] + + def set_current_difficulty(self, difficulty): + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty + + def set_custom_get_difficulty(self, schedule_function): + self.custom_get_difficulty = schedule_function + + def get_state(self): + return self.state + + def set_state(self, state): + self.state = state + + def __fixed_discrete_get_difficulty(self, global_steps): + s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]: + return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1] + for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])): + if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]: + return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i] + + def __fixed_root_get_difficulty(self, global_steps, root_degree=None): + s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + if root_degree is None: + root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE] + next_difficulty = (float(global_steps) / s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**(1.0 / root_degree) + next_difficulty = math.floor( + next_difficulty * + (self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) + + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) + next_difficulty -= (next_difficulty % s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP]) + next_difficulty = min(next_difficulty, self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]) + return next_difficulty + + def get_difficulty(self, global_steps): + if self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: + return self.__fixed_discrete_get_difficulty(global_steps) + elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: + return self.__fixed_root_get_difficulty(global_steps, 1) + elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: + return self.__fixed_root_get_difficulty(global_steps) + elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: + return self.custom_get_difficulty(global_steps) + else: + raise RuntimeError('Unsupported curriculum schedule type') + + def update_difficulty(self, global_steps): + if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]: + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty(global_steps) + return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..93d351169834d68d6430d9f36d31e0fba5f5c51b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py @@ -0,0 +1,880 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import sys +from collections import defaultdict +import csv +import time +from multiprocessing import Process, Manager +import numpy as np +import torch +from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset + +import deepspeed.comm as dist +from deepspeed.utils import logger +from deepspeed.runtime.data_pipeline.data_sampling.indexed_dataset import MMapIndexedDataset, valid_dtypes +from deepspeed.runtime.data_pipeline.data_sampling.utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DataAnalyzer(object): + + def __init__(self, + dataset, + num_workers=1, + worker_id=0, + num_threads=1, + num_threads_reduce=1, + specific_threads=[], + batch_size=1, + metric_names=[], + metric_functions=[], + metric_types=[], + metric_dtypes=[], + save_path="./", + collate_fn=None, + custom_map_init=None, + custom_map_update=None, + custom_map_finalize=None, + custom_reduce=None, + sample_indices=None): + super().__init__() + self.dataset = dataset + self.num_workers = num_workers + self.worker_id = worker_id + self.num_threads = num_threads + self.num_threads_reduce = num_threads_reduce + self.specific_threads = specific_threads + self.batch_size = batch_size + self.metric_names = metric_names + self.metric_functions = metric_functions + self.metric_types = metric_types + self.metric_dtypes = metric_dtypes + self.save_path = save_path + self.collate_fn = collate_fn + self.custom_map_init = custom_map_init + self.custom_map_update = custom_map_update + self.custom_map_finalize = custom_map_finalize + self.custom_reduce = custom_reduce + self.sample_indices = sample_indices + + def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id): + metric_results = [] + for m_idx in range(len(metric_names)): + metric_name, metric_type, metric_dtype = metric_names[m_idx], \ + metric_types[m_idx], metric_dtypes[m_idx] + assert metric_dtype in valid_dtypes, f"metric_dtype {metric_dtype} not supported. Supported dtypes {valid_dtypes}" + metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/" + os.makedirs(metric_save_path, exist_ok=True) + if metric_type == 'single_value_per_sample': + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype) + metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample" + os.system(f"rm -rf {metric_to_sample_fname}*") + metric_to_sample_dict = defaultdict(list) + metric_results.append({ + "sample_to_metric_fname": sample_to_metric_fname, + "sample_to_metric_builder": sample_to_metric_builder, + "metric_to_sample_fname": metric_to_sample_fname, + "metric_to_sample_dict": metric_to_sample_dict + }) + elif metric_type == 'accumulate_value_over_samples': + metric_value = None + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname}) + return metric_results + + def update_metric_results(self, + data, + metric_types, + metric_dtypes, + metric_functions, + metric_results, + batch_start_idx=0): + for m_idx in range(len(metric_types)): + metric_type, metric_dtype, metric_function, metric_result = metric_types[m_idx], \ + metric_dtypes[m_idx], metric_functions[m_idx], metric_results[m_idx] + metric_values = metric_function(data) + + assert torch.is_tensor(metric_values) or isinstance(metric_values, np.ndarray), \ + "metric_function must return a tensor or array" + assert metric_values.dtype == metric_dtype, \ + f"metric_function result dtype {metric_values.dtype} does not match metric_dtype {metric_dtype}" + if isinstance(metric_values, np.ndarray): + metric_values = torch.from_numpy(metric_values) + + if metric_type == 'single_value_per_sample': + for row in range(metric_values.size()[0]): + sample_idx = batch_start_idx + row # sample idx following dataset iteration order + if isinstance(data, dict) and 'index' in data: # Megatron use case, idx provided in 'index' field + sample_idx = data['index'][row][0].item() + elif self.sample_indices is not None: # user defined shuffling of indices + sample_idx = self.sample_indices[sample_idx] + metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1)) + metric_result["metric_to_sample_dict"][metric_values[row].item()].append(sample_idx) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 100: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + if metric_result["metric_value"] is None: + metric_result["metric_value"] = metric_values + else: + metric_result["metric_value"].add_(metric_values) + + def finalize_metric_results(self, metric_types, metric_dtypes, metric_results): + for m_idx in range(len(metric_types)): + metric_type, metric_dtype, metric_result = metric_types[m_idx], \ + metric_dtypes[m_idx], metric_results[m_idx] + if metric_type == 'single_value_per_sample': + metric_fname = metric_result["sample_to_metric_fname"] + close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 0: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + if metric_result["metric_value"] is not None: + metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"], + metric_dtype) + metric_value_builder.add_item(metric_result["metric_value"].reshape(-1)) + close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"]) + + def run_map_helper(self, thread_id): + start_idx, end_idx = self.thread_splits[thread_id][0], \ + self.thread_splits[thread_id][1] + logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \ + f"on data subset {start_idx} to {end_idx}") + thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx))) + sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False) + iterator = iter( + DataLoader(thread_dataset, + batch_sampler=sampler, + num_workers=0, + collate_fn=self.collate_fn, + pin_memory=False)) + if self.custom_map_init is None: + metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types, + self.metric_dtypes, self.save_path, self.worker_id) + else: + metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes, + self.save_path, self.worker_id) + total_sample = len(thread_dataset) + processed_sample = 0 + start = time.time() + while True: + try: + data = next(iterator) + batch_start_idx = start_idx + processed_sample + if self.custom_map_update is None: + self.update_metric_results(data, self.metric_types, self.metric_dtypes, self.metric_functions, + metric_results, batch_start_idx) + else: + self.custom_map_update(data, self.metric_types, self.metric_dtypes, self.metric_functions, + metric_results, batch_start_idx) + processed_sample += len(data) + duration = (time.time() - start) / 3600.0 + remain_duration = duration * total_sample / processed_sample - duration + logger.info( + f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \ + f"out of {total_sample} processed in {duration:.2f} hr, " \ + f"estimated to finish in {remain_duration:.2f} hr") + except StopIteration: + logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file") + break + if self.custom_map_finalize is None: + self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results) + else: + self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results) + logger.info(f"worker {self.worker_id} thread {thread_id}: finished") + + def run_map(self): + self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id, + self.num_threads) + if len(self.specific_threads) > 0: + threads_to_run = self.specific_threads + else: + threads_to_run = list(range(self.num_threads)) + if self.num_threads > 1: + p = [] + for thread in threads_to_run: + p.append(Process(target=self.run_map_helper, args=(thread, ))) + p[thread].start() + + for thread in threads_to_run: + p[thread].join() + else: + assert self.num_threads == 1 + self.run_map_helper(0) + + def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples): + logger.info(f"Checking the value percentiles of metric {metric_name}...") + processed_samples = 0 + current_percentile = 5 + for key in sorted(num_sample_per_value.keys()): + processed_samples += num_sample_per_value[key] + if processed_samples >= total_num_samples * current_percentile / 100.0: + logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}") + current_percentile += 5 + + def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path, + metric_name, return_dict): + results = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce: + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) + unique_v = list(np.unique(w_sample_to_metric)) + sample_to_metric_count = len(w_sample_to_metric) + logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.") + results.append([unique_v, sample_to_metric_count]) + return_dict[t_idx_reduce] = results + + def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype, + map_worker_thread): + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) + for w_t in map_worker_thread: + w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) + for row in range(len(w_data)): + sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long)) + logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.") + close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) + + def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype, + unique_metric_values, num_workers, num_threads): + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) + for unique_v in unique_metric_values: + samples = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv" + if os.path.isfile(w_metric_to_sample_fname): + with open(w_metric_to_sample_fname, 'r') as f: + datareader = csv.reader(f) + for row in datareader: + samples += [int(x) for x in row] + index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long)) + index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long)) + logger.info(f"Finished reducing metric {metric_name} value {unique_v}.") + close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) + + def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads, + num_threads_reduce): + total_num_samples = len(dataset) + sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1) + logger.info( + f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes." + ) + for m_idx in range(len(metric_names)): + metric_name, metric_type = metric_names[m_idx], metric_types[m_idx] + if metric_type == 'single_value_per_sample': + metric_save_path = f"{save_path}/{metric_name}/" + sample_to_metric_count = 0 + unique_metric_values = set([]) + manager = Manager() + return_dict = manager.dict() + p = [] + for t_idx_reduce in range(num_threads_reduce): + p.append( + Process(target=self.merge_gather_map_stats, + args=( + num_workers, + num_threads, + num_threads_reduce, + t_idx_reduce, + metric_save_path, + metric_name, + return_dict, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + for t_idx_reduce in range(num_threads_reduce): + results = return_dict[t_idx_reduce] + for res in results: + unique_metric_values = unique_metric_values.union(set(res[0])) + sample_to_metric_count += res[1] + value_max = max(unique_metric_values) + value_min = min(unique_metric_values) + assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully." + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + logger.info( + f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values." + ) + + # sample_to_metric + map_worker_thread = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + map_worker_thread.append([w_idx, t_idx]) + thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_sample_to_metric, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + metric_value_dtype, + map_worker_thread[start_idx:end_idx], + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_fname}") + sample_to_metric_builder.merge_file_(chunk_fname) + close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) + sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True) + assert len(sample_to_metric) == total_num_samples + + # metric_to_sample + unique_metric_values = list(sorted(unique_metric_values)) + thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_metric_to_sample, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + sample_idx_dtype, + metric_value_dtype, + unique_metric_values[start_idx:end_idx], + num_workers, + num_threads, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" + index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" + index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_is_fname}") + index_to_sample_builder.merge_file_(chunk_is_fname) + chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_im_fname}") + index_to_metric_builder.merge_file_(chunk_im_fname) + close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) + + num_sample_per_value = DataAnalyzer.output_index_to_sample_percentile( + index_to_sample_fname, index_to_metric_fname, metric_name, metric_save_path, total_num_samples, + sample_idx_dtype) + self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples) + elif metric_type == 'accumulate_value_over_samples': + metric_save_path = f"{save_path}/{metric_name}/" + metric_value = None + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value" + w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True) + if metric_value is None: + metric_value = np.copy(w_metric_value[0]) + else: + metric_value += np.copy(w_metric_value[0]) + value_max = int(max(metric_value)) + value_min = int(min(metric_value)) + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype) + metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long)) + close_mmap_dataset_builder(metric_value_builder, metric_value_fname) + + @staticmethod + def output_index_to_sample_percentile(index_to_sample_fname, index_to_metric_fname, metric_name, metric_save_path, + total_num_samples, sample_idx_dtype): + """ read index_to_metric and index_to_sample files and write distribution to index_to_sample_percentage_merged """ + num_sample_per_value = {} + index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True) + index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True) + index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged" + index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname, sample_idx_dtype) + for v_idx in range(len(index_to_sample)): + if v_idx > 0: + assert index_to_metric[v_idx] > index_to_metric[v_idx - 1] + num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx]) + assert sum(list(num_sample_per_value.values())) == total_num_samples + merge_step = max(1, len(index_to_sample) // 100) + for v_idx in range(0, len(index_to_sample), merge_step): + merged_samples = np.copy( + np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))], axis=None)) + index_to_sample_merged_builder.add_item(torch.tensor(merged_samples.astype(np.int64), dtype=torch.long)) + logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.") + close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname) + return num_sample_per_value + + def run_reduce(self): + if self.custom_reduce is None: + self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path, + self.num_workers, self.num_threads, self.num_threads_reduce) + else: + self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers, + self.num_threads, self.num_threads_reduce) + + def run_map_reduce(self, comm_group=None): + self.run_map() + # wait for the mapping operation, where all nodes outputs their own (partial) result files + dist.barrier(group=comm_group) + if self.worker_id == 0: + self.run_reduce() + # wait for the reduce, where rank 0 merges all (partial) files. Dataset can then be used by all nodes. + dist.barrier(group=comm_group) + + +class DistributedDataAnalyzer(object): + + def __init__( + self, + dataset, + num_workers=1, + num_threads=1, + worker_id=0, + batch_size=1, + metric_names=[], + metric_functions=[], + metric_types=[], + save_path="./", + collate_fn=None, + device='cuda', + comm_group=None, + sample_indices=None, + ) -> None: + self.dataset = dataset + self.batch_size = batch_size + self.metric_names = metric_names + self.metric_functions = metric_functions + self.metric_types = metric_types + self.save_path = save_path + self.collate_fn = collate_fn + self.device = device + self.sample_indices = sample_indices + self.num_threads = num_threads + self.worker_id = worker_id + + if not dist.is_initialized(): + dist.init_distributed() + + # comm_group and worker_id+num_workers are mutually exclusive + self.comm_group = comm_group + if self.comm_group is None: + # self.comm_group = deepspeed.utils.groups._clone_world_group() + self.num_workers = num_workers + self.worker_id = worker_id + else: + self.num_workers = self.comm_group.size() + self.worker_id = self.comm_group.rank() + + if self.worker_id == 0: + logger.info(f"Distributed data analyzer initialized with {self.num_workers} workers.") + + def run_map_helper(self, thread_id=0, metric_queues=None): + thread_start_idx, thread_end_idx = self.thread_splits[thread_id][0], self.thread_splits[thread_id][1] + worker_dataset = Subset(self.dataset, list(range(thread_start_idx, thread_end_idx))) + sampler = BatchSampler(SequentialSampler(worker_dataset), batch_size=self.batch_size, drop_last=False) + dataloader = DataLoader(dataset=worker_dataset, + batch_sampler=sampler, + num_workers=0, + collate_fn=self.collate_fn, + pin_memory=False) + + # set initial results list + metric_results = [] + for metric_type in self.metric_types: + assert metric_type in ['single_value_per_sample', 'accumulate_value_over_samples'], \ + f"metric_type {metric_type} not implemented." + metric_results.append([] if metric_type == 'single_value_per_sample' else None) + + # iterate dataloader and store metric results + batch_start_idx = thread_start_idx + for data in dataloader: + for m_idx in range(len(self.metric_names)): + metric_type, metric_function = self.metric_types[m_idx], self.metric_functions[m_idx] + metric_values = metric_function(data) + assert torch.is_tensor(metric_values) or isinstance(metric_values, np.ndarray), \ + "metric_function must return a tensor or array" + if isinstance(metric_values, np.ndarray): + metric_values = torch.from_numpy(metric_values) + assert metric_values.dtype in valid_dtypes, \ + f"metric_function result dtype {metric_values.dtype} not supported. Supported dtypes {valid_dtypes}" + + if metric_type == 'single_value_per_sample': + for row in range(metric_values.size()[0]): + value = metric_values[row].item() + sample_idx = batch_start_idx + row # sample idx following dataset iteration order + if isinstance(data, dict) and 'index' in data: # Megatron use case + sample_idx = data['index'][row][0].item() + elif self.sample_indices is not None: # user defined shuffling of indices + sample_idx = self.sample_indices[sample_idx] + metric_results[m_idx].append((value, sample_idx)) + elif metric_type == 'accumulate_value_over_samples': + if metric_results[m_idx] is None: + metric_results[m_idx] = metric_values + else: + metric_results[m_idx].add_(metric_values) + batch_start_idx += len(data) + + if self.num_threads == 1: + return metric_results + + # copy metric_results to the shared queue + assert metric_queues + for m_idx in range(len(self.metric_names)): + results = metric_results[m_idx] + if torch.is_tensor(results): + results = results.item() if results.dim() == 0 else results.tolist() + try: + metric_queues[m_idx].put((thread_id, results)) + except Exception as e: + logger.error(f"Error putting metric results to queue: {e}") + sys.exit(1) + + def run_map_reduce(self): + + # setup individual dataloaders + self.worker_splits, self.thread_splits = split_dataset(self.dataset, + self.num_workers, + self.worker_id, + num_threads=self.num_threads) + node_start_idx, node_end_idx = self.worker_splits[self.worker_id] + logger.info(f"worker {self.worker_id} working on data subset {node_start_idx} to {node_end_idx}.") + + if self.num_threads in [0, 1, None]: + metric_results = self.run_map_helper() + metric_results = [torch.tensor(m).to(self.device) for m in metric_results] + else: + + # create a shared queue of results per metric to be populated by individual threads + with Manager() as manager: + metric_queues = [manager.Queue() for _ in self.metric_names] + threads = [ + Process(target=self.run_map_helper, args=(t, metric_queues)) for t in range(self.num_threads) + ] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + # gather results from shared queues into metric_results + metric_results = [None for _ in self.metric_names] + for m_idx, (queue, metric_type) in enumerate(zip(metric_queues, self.metric_types)): + while not queue.empty(): + t_idx, t_results = queue.get() + t_start_idx, t_end_idx = self.thread_splits[t_idx] + if t_start_idx >= t_end_idx: # no results from this thread + continue #corner case for small datasets and high thread count + t_results = torch.tensor(t_results) + if metric_type == 'single_value_per_sample': + # add thread results to the metric_results list, ordered by thread idx + if metric_results[m_idx] is None: # initialize if needed + metric_results[m_idx] = torch.zeros(node_end_idx - node_start_idx, + t_results.size(1)).to(self.device) + metric_results[m_idx][t_start_idx - node_start_idx:t_end_idx - node_start_idx] = t_results + else: + if metric_results[m_idx] is None: # initialize if needed + metric_results[m_idx] = torch.zeros(t_results.size()).to(self.device) + metric_results[m_idx].add_(t_results) + + # compute dtype for sample ids + total_num_samples = len(self.dataset) + sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1) + logger.info(f"Total number of data samples: {total_num_samples}.") + logger.info(f"Will use {sample_idx_dtype} to store the sample indexes.") + + for m_idx in range(len(self.metric_names)): + metric_values, metric_name, metric_type = \ + metric_results[m_idx], self.metric_names[m_idx], self.metric_types[m_idx] + metric_save_path = f"{self.save_path}/{metric_name}/" + os.makedirs(metric_save_path, exist_ok=True) + + if metric_type == 'single_value_per_sample': + + # Compute sample and metric value dtypes based on range + values, samples = metric_values[:, 0], metric_values[:, 1] + value_min, value_max = Dist.min_max(values, self.comm_group) + sample_min, sample_max = Dist.min_max(samples, self.comm_group) + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + sample_value_dtype = find_fit_int_dtype(sample_min, sample_max) + + # sample_to_metric maps sample ids to metric values, as a list of metric values + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + values = [torch.tensor([x]) for x in metric_values[:, 0]] + self.file_write_ordered(values, sample_to_metric_fname, metric_value_dtype) + + # distributed sorting by values, gives an ordered disjoint subset of keys on nodes + metric_values = Dist.sample_sort(metric_values, self.comm_group, self.num_workers) + metric_to_samples_dict = {} + if len(metric_values) > 0: + for value, sample in metric_values: + if value.item() not in metric_to_samples_dict: + metric_to_samples_dict[value.item()] = [] + metric_to_samples_dict[value.item()].append(sample.item()) + + # index_to_metric and index_to_sample serialize a dicitonary from metric to samples + # index_to_metric stores a key per row, index_to_sample stores the values per row + values = [torch.tensor([x]) for x in metric_to_samples_dict.keys()] + samples = [torch.tensor(metric_to_samples_dict[x]) for x in metric_to_samples_dict.keys()] + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" #dict keys + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" #dict values + self.file_write_ordered(values, index_to_metric_fname, metric_value_dtype) + self.file_write_ordered(samples, index_to_sample_fname, sample_value_dtype) + + if self.worker_id == 0: + DataAnalyzer.output_index_to_sample_percentile(index_to_sample_fname, index_to_metric_fname, + metric_name, metric_save_path, total_num_samples, + sample_idx_dtype) + dist.barrier(self.comm_group) + + elif metric_type == 'accumulate_value_over_samples': + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + dist.reduce(metric_values, dst=0, op=dist.ReduceOp.SUM, group=self.comm_group) + metric_value_dtype = find_fit_int_dtype(metric_values.min(), metric_values.max()) + + if self.worker_id == 0: + builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype) + builder.add_item(metric_values.cpu()) + close_mmap_dataset_builder(builder, metric_value_fname) + dist.barrier(self.comm_group) + + def file_write_ordered(self, tensor_list, fname, numpy_dtype): + """ MPI_file_write_ordered extended to write a list of tensors, by one rank, iteratively """ + + # each node has a list of rows (tensors) to be written to the file. + # we will serialize it in order to communicate it in one comm step. + + tkwargs = dict(dtype=torch.int64, device=self.device) + + # 1. gather on rank 0 the number of rows to be sent/recv + row_count = torch.tensor([len(tensor_list)], **tkwargs) + row_counts = torch.zeros(self.num_workers, **tkwargs) + dist.all_gather_into_tensor(row_counts, row_count, group=self.comm_group) + assert row_counts[self.worker_id] == row_count == len(tensor_list), "all_gather failed" + + # 2. gather on rank 0 the sizes of the rows to be sent/recv + row_len = torch.tensor([len(l) for l in tensor_list], **tkwargs) + row_lens = Dist.gather_v(row_len, 0, self.comm_group, self.num_workers, self.worker_id) + + # 4. gather on rank 0 of the total size (sum of all row lengths) to be received + size = torch.tensor([sum(row_len).item()], **tkwargs) + sizes = torch.zeros(self.num_workers, **tkwargs) + dist.all_gather_into_tensor(sizes, size, group=self.comm_group) + assert sizes[self.worker_id] == size.item(), "all_gather did not return the same sizes" #sanity check + + # method to deserializes a buffer into rows of different lengths and write them to file + def write_buffer_to_file(buff, src, builder): + assert self.worker_id == 0, "only rank 0 can write to file" + + # collect all buffers and write them at once + buff = buff.cpu().detach().numpy() + row_offsets = np.cumsum([0] + row_lens[src].tolist()) + arr_list = [] + for i in range(len(row_lens[src])): + arr_list.append(buff[row_offsets[i]:row_offsets[i + 1]]) + builder.add_items(arr_list) + + # 5. rank 0 prepares output folder and file + if self.worker_id == 0: + os.makedirs(os.path.dirname(fname), exist_ok=True) + builder = create_mmap_dataset_builder(fname, numpy_dtype) + + # iterate through ranks that have data to be sent/recv/written + for src in [rank for rank, count in enumerate(row_counts) if count > 0]: + + dist.barrier(group=self.comm_group) + if self.worker_id == 0 and src == 0: # rank 0's write its own data + buffer = torch.cat(tensor_list, dim=0).to(self.device) + write_buffer_to_file(buffer, 0, builder) + elif self.worker_id == 0 and src > 0: # rank 0 receives other rank's data and writes it + buffer = torch.empty(sizes[src].item(), dtype=buffer.dtype, device=buffer.device) + err = dist.recv(buffer, src=src, group=self.comm_group, tag=src) + assert err == src and len(buffer) > 0, "recv failed" + write_buffer_to_file(buffer, src, builder) + elif self.worker_id == src: # current rank sends data to rank 0 + buffer = torch.cat(tensor_list, dim=0).to(self.device) + dist.send(buffer, 0, group=self.comm_group, tag=src) + + # rank 0 closes the file + if self.worker_id == 0: + close_mmap_dataset_builder(builder, fname) # close file + dist.barrier(self.comm_group) + + +class Dist: + """ auxiliary class to perform distributed operations on tensors""" + + @staticmethod + def min_max(tensor, comm_group): + """ given a distributed tensor, return the min/max values across all ranks""" + + value_min, value_max = tensor.min(), tensor.max() + dist.reduce(value_min, 0, op=dist.ReduceOp.MIN, group=comm_group) + dist.reduce(value_max, 0, op=dist.ReduceOp.MAX, group=comm_group) + return value_min.item(), value_max.item() + + @staticmethod + def gather_v(tensor, dst, comm_group, num_workers, worker_id): + """ MPI_Gatherv. gather tensors of variable sizes in a single rank """ + + # gather the number of rows to be sent/recv + size = torch.tensor([len(tensor)], dtype=torch.int64, device=tensor.device) + sizes = torch.zeros(num_workers, dtype=torch.int64, device=tensor.device) + dist.all_gather_into_tensor(sizes, size, group=comm_group) + assert sizes[worker_id] == size, "all_gather failed" + + # all_gather requires all tensors to be of same size so we need to pad them + max_size = max(sizes).item() + buffer = torch.empty(max_size, dtype=tensor.dtype, device=tensor.device) + buffer[0:size] = tensor.data + buffer_list = None + if worker_id == 0: # create padded recv buffers + buffer_list = [torch.empty(max_size, dtype=tensor.dtype, device=tensor.device) for _ in range(num_workers)] + dist.gather(buffer, buffer_list, dst=dst, group=comm_group) + + # revert padding and return value + if worker_id == 0: + buffer_list = [r[:s.item()] for r, s in zip(buffer_list, sizes)] + return buffer_list + + @staticmethod + def sample_sort(tensor, comm_group, num_workers, n_samples=100): + """ perform a distributed random sort of a tensor, and returns the sorted partial tensor""" + device, dims = tensor.device, tensor.size()[1] + + # 1 - sort rows by first column, then second column, then third, etc... + tensor = torch.tensor(sorted(tensor.tolist()), dtype=tensor.dtype, device=tensor.device) + + # 2 - collect few samples per rank + idx = torch.round(torch.linspace(0, len(tensor) - 1, n_samples)).to(int) + samples = tensor[idx][:, 0].contiguous().to(device) #only first column, all but last row + + # 2 - Allgather samples + all_samples = [torch.zeros(n_samples, dtype=samples.dtype, device=device) for _ in range(num_workers)] + dist.all_gather(all_samples, samples, group=comm_group) + all_samples = torch.cat(all_samples, dim=0).to(device) + + # 3 - Sort all samples and collect the ranges of each rank as equidistant + all_samples = all_samples.sort()[0] + idx = torch.round(torch.linspace(0, len(all_samples) - 1, num_workers + 1)).to(int) + ranges = all_samples[idx] # range of each rank r as ranges[r] <= x < ranges[r+1] + ranges[-1] += 1 # increase upper limit of last rank so that x < ranges[r+1]. + + # 4 - collect elements to send to each rank, based on the rank ranges + send = [] + for rank in range(num_workers): + mask = (tensor[:, 0] >= ranges[rank]) & (tensor[:, 0] < ranges[rank + 1]) + send.append(tensor[mask]) + + # 5. all to all to communicate the sizes to be sent/recv + send_count = [torch.tensor([len(s) * dims], dtype=torch.int64, device=device) for s in send] + recv_count = list(torch.empty([num_workers], dtype=torch.int64, device=device).chunk(num_workers)) + dist.all_to_all(recv_count, send_count, group=comm_group) + + # 6. all-to-all-v to communicate the elements to be sent/recv as a single tensor + send = torch.cat(send, dim=0).flatten().to(device) + recv = torch.zeros(sum(recv_count), dtype=send.dtype).to(device) + send_count = [s.item() for s in send_count] # convert to list of ints + recv_count = [r.item() for r in recv_count] + dist.all_to_all_single(recv, send, recv_count, send_count, group=comm_group) + del send + + # 7. the received tensor is the 1D disjoint subset of the distributed tensor. + # We will recover the original dimensionality and sort it by columns again. + recv = recv.view(-1, dims) + recv = torch.tensor(sorted(recv.tolist()), dtype=recv.dtype, device=recv.device) + return recv + + +def test_compare_both_data_analyzers(dataset): + """ given a dataset, compare file and memory based data analyser""" + + id = lambda t: t.to(torch.int64) # identity + batch_sum = lambda t: id(t).sum() #sum batch + num_threads = 4 + kwargs = dict( + dataset=dataset, + batch_size=2**10, + worker_id=int(os.environ['RANK']), + num_workers=int(os.environ['WORLD_SIZE']), + metric_names=["mod", "batch_sum"], + metric_functions=[id, batch_sum], + metric_types=['single_value_per_sample', 'accumulate_value_over_samples'], + num_threads=num_threads, + ) + + dda = DistributedDataAnalyzer( + save_path="./output_dist", + device=f"cuda:{int(os.environ['LOCAL_RANK'])}", + **kwargs, + ) + start_time = time.time() + dda.run_map_reduce() + if dda.worker_id == 0: + print("DistributedDataAnalyzer runtime: %s seconds " % (time.time() - start_time)) + + da = DataAnalyzer(num_threads_reduce=num_threads, + save_path="./output_disk", + metric_dtypes=[torch.int64, torch.int64], + **kwargs) + start_time = time.time() + da.run_map_reduce() + if da.worker_id == 0: + print("DataAnalyzer runtime: %s seconds " % (time.time() - start_time)) + + output_paths = [ + "batch_sum/batch_sum_metric_value.bin", "batch_sum/batch_sum_metric_value.idx", \ + "mod/mod_index_to_metric.bin", "mod/mod_index_to_metric.idx", \ + "mod/mod_index_to_sample.bin", "mod/mod_index_to_sample.idx", \ + "mod/mod_index_to_sample_percentile_merged.bin", "mod/mod_index_to_sample_percentile_merged.idx", \ + "mod/mod_sample_to_metric.bin", "mod/mod_sample_to_metric.idx" + ] + + if dda.worker_id == 0: + for path in output_paths: + with open(os.path.join(da.save_path, path), 'rb') as f1, \ + open(os.path.join(dda.save_path, path), 'rb') as f2: + if f1.read() != f2.read(): + print(f"files {path} are not identical.") + + +if __name__ == "__main__": + + class TestDataset(torch.utils.data.Dataset): + + def __init__(self, size=10_000_000): + self.values = [(x + 7) % 10_000 for x in range(size)] + self.size = size + + __len__ = lambda self: self.size + __getitem__ = lambda self, idx: self.values[idx] + + test_compare_both_data_analyzers(TestDataset()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..100bef3f7946c8d2b7280e234abc5e625b6c68f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py @@ -0,0 +1,349 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +coding=utf-8 + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py +""" + +import torch +import os +import numpy as np + +import deepspeed.comm as dist +from deepspeed.utils import logger +from deepspeed.accelerator import get_accelerator +from ..constants import * +from ..curriculum_scheduler import CurriculumScheduler +from .indexed_dataset import MMapIndexedDataset +from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DeepSpeedDataSampler(object): + + def __init__(self, + data_efficiency_config, + one_epoch_total_samples, + micro_batch_size, + data_parallel_rank, + data_parallel_size, + data_parallel_group, + gradient_accumulation_steps, + global_rank, + drop_last=True): + # Keep a copy of input params for later use. + self.data_efficiency_config = data_efficiency_config + self.one_epoch_total_samples = one_epoch_total_samples + self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples) + self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][ + DATA_SAMPLING_NUM_EPOCHS] + self.micro_batch_size = micro_batch_size + self.data_parallel_rank = data_parallel_rank + self.data_parallel_group = data_parallel_group + self.micro_batch_times_data_parallel_size = \ + self.micro_batch_size * data_parallel_size + self.gradient_accumulation_steps = gradient_accumulation_steps + self.global_batch_size = self.micro_batch_times_data_parallel_size * \ + self.gradient_accumulation_steps + self.global_rank = global_rank + self.drop_last = drop_last + self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED]) + self.state = {} + self.batch = [] + self.consumed_samples = 0 + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step = 0 + self.current_difficulties = {} + self.data_cluster_paths = [] + self.data_cluster_current_position = [] + self.curriculum_schedulers = {} + self.curriculum_index_to_sample = {} + self.curriculum_index_to_metric = {} + self.difficulty_type = {} + self.clustering_type = {} + self.data_1epoch_size = None + if self.global_rank == 0: + self.data_clusters = [] + self.data_cluster_sizes = [] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + if not os.path.exists(cluster_path): + os.makedirs(cluster_path) + for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]: + self.curriculum_schedulers[metric] = CurriculumScheduler( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric]) + self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE] + self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE] + if self.global_rank == 0: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + self.curriculum_index_to_sample[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] + [metric][CURRICULUM_LEARNING_SAMPLE_PATH], + skip_warmup=True) + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + self.curriculum_index_to_metric[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] + [metric][CURRICULUM_LEARNING_METRIC_PATH], + skip_warmup=True) + + # Sanity checks. + assert self.total_samples > 0, \ + 'no sample to consume: {}'.format(self.total_samples) + assert self.micro_batch_size > 0 + assert data_parallel_size > 0 + assert self.data_parallel_rank < data_parallel_size, \ + 'data_parallel_rank should be smaller than data size: {}, ' \ + '{}'.format(self.data_parallel_rank, data_parallel_size) + + def __len__(self): + return self.total_samples + + def set_custom_curriculum_learning_schedule(self, schedule_func_dict): + for metric in self.curriculum_schedulers: + if metric in schedule_func_dict: + self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric]) + + def get_start_end_idx(self, batch_len=None): + """ + given the length of a minibatch (defaults to micro-batch size * data_parallel_size), + return the start and end indices of the current data parallel rank + """ + batch_len = batch_len or self.micro_batch_times_data_parallel_size + start_idx_fn = lambda r: round(r * batch_len / self.data_parallel_group.size()) + start_idx = start_idx_fn(self.data_parallel_rank) + end_idx = start_idx_fn(self.data_parallel_rank + 1) + return start_idx, end_idx + + def get_sample_based_on_metric_value(self, metric, value_start, value_end): + new_samples = None + for row in range(len(self.curriculum_index_to_sample[metric])): + if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][ + row] > value_start: + row_samples = np.copy(self.curriculum_index_to_sample[metric][row]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, row_samples), axis=None) + return new_samples + + def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end): + new_samples = None + if self.data_1epoch_size is None: + self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric]) + max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][ + metric][CURRICULUM_LEARNING_MAX_DIFFICULTY] + sample_per_percentile = self.data_1epoch_size // max_percentile + start_count = sample_per_percentile * percentile_start + end_count = sample_per_percentile * percentile_end + if percentile_end == max_percentile: + end_count = self.data_1epoch_size + current_count = 0 + for row in range(len(self.curriculum_index_to_sample[metric])): + row_size = len(self.curriculum_index_to_sample[metric][row]) + if current_count + row_size > start_count: + row_start = max(0, start_count - current_count) + if current_count + row_size <= end_count: + row_end = row_size + else: + row_end = end_count - current_count + row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, row_samples), axis=None) + current_count += row_size + if current_count >= end_count: + break + return new_samples + + def get_new_cluster(self, previous_difficulties): + cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX + for metric in self.curriculum_schedulers: + cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}" + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + if self.global_rank == 0: + new_cluster = None + need_clustering = 0 + for metric in self.clustering_type: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + need_clustering += 1 + if need_clustering > 1: + for metric in self.curriculum_schedulers: + if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER: + metric_cluster = np.arange(start=0, + stop=self.one_epoch_total_samples, + step=1, + dtype=self.index_dtype) + else: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'), + self.current_difficulties[metric]) + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + metric_cluster = self.get_sample_based_on_metric_percentile( + metric, 0, self.current_difficulties[metric]) + new_cluster = metric_cluster if new_cluster is None else \ + np.intersect1d(new_cluster, metric_cluster, assume_unique=True) + for cluster in self.data_clusters: + new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True) + else: + if len(self.data_clusters) == 0: + new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype) + for metric in self.curriculum_schedulers: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric], + self.current_difficulties[metric]) + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + new_cluster = self.get_sample_based_on_metric_percentile( + metric, previous_difficulties[metric], self.current_difficulties[metric]) + if new_cluster is not None and len(new_cluster) > 0: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated." + ) + self.np_rng.shuffle(new_cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) + cluster_builder.add_item_numpy(new_cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) + else: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped." + ) + dist.barrier(group=self.data_parallel_group) + if os.path.isfile(f"{cluster_path}.bin"): + self.data_cluster_paths.append(cluster_fname) + self.data_cluster_current_position.append(0) + + def sample_from_clusters(self): + num_clusters = len(self.data_clusters) + weight_sum = sum(self.data_cluster_sizes) + weights = [x / weight_sum for x in self.data_cluster_sizes] + samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights) + samples = np.bincount(samples, minlength=num_clusters) + return samples + + def reshuffle_clusters(self, cidx): + cluster_fname = self.data_cluster_paths[cidx] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + cluster = np.copy(self.data_clusters[cidx][0]) + self.np_rng.shuffle(cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) + cluster_builder.add_item_numpy(cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True) + + def get_sample_from_cluster(self, cidx, num_samples): + start_idx = self.data_cluster_current_position[cidx] + samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)])) + self.data_cluster_current_position[cidx] += num_samples + if len(samples) < num_samples: + num_samples_remained = num_samples - len(samples) + logger.info(f"reshuffling cluster {cidx}.") + self.reshuffle_clusters(cidx) + samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained])) + self.data_cluster_current_position[cidx] = num_samples_remained + return samples + + def get_next_global_batch(self): + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step += 1 + new_cluster = False + previous_difficulties = {} + for metric in self.curriculum_schedulers: + next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step) + if metric not in self.current_difficulties or \ + next_difficulty != self.current_difficulties[metric]: + new_cluster = True + if metric in self.current_difficulties: + previous_difficulties[metric] = self.current_difficulties[metric] + else: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + previous_difficulties[metric] = float('-inf') + elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + previous_difficulties[metric] = 0 + self.current_difficulties[metric] = next_difficulty + if new_cluster: + self.get_new_cluster(previous_difficulties) + if self.global_rank == 0: + samples_per_cluster = self.sample_from_clusters() + batch = [] + for cidx in range(len(samples_per_cluster)): + batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx]) + self.np_rng.shuffle(batch) + + # broadcast tensor must have same shape across participants. So we fill batch with -1s when not full + assert len(batch) <= self.global_batch_size + batch += [-1] * (self.global_batch_size - len(batch)) + batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1) + else: + batch = torch.empty(self.global_batch_size, + device=get_accelerator().current_device_name(), + dtype=torch.long) + dist.broadcast(batch, 0, group=self.data_parallel_group) + batch = batch[batch != -1] # remove trailing -1s used to fill incomplete batch tensor + self.batch = batch.tolist() + + def __iter__(self): + while self.consumed_samples <= self.total_samples: + if len(self.batch) == 0: + self.get_next_global_batch() + current_batch = self.batch[:self.micro_batch_times_data_parallel_size] + self.batch = self.batch[self.micro_batch_times_data_parallel_size:] + if len(current_batch) == self.micro_batch_times_data_parallel_size or \ + (len(current_batch) > 0 and not self.drop_last): + start_idx, end_idx = self.get_start_end_idx(len(current_batch)) + yield current_batch[start_idx:end_idx] + self.consumed_samples += len(current_batch) + current_batch = [] + + def state_dict(self): + return { + CURRICULUM_LEARNING_BATCH: self.batch, + CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples, + CURRICULUM_LEARNING_STEP: self.curriculum_step, + CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties, + CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths, + CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position, + CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state() + } + + def load_state_dict(self, state_dict): + self.batch = state_dict[CURRICULUM_LEARNING_BATCH] + self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES] + self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP] + self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES] + self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS] + self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION] + np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE]) + cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + # Backward compatibility: previously data_cluster_paths were stored as + # absolute paths. Now we changed it to just the file name so that even + # if user moved the cluster files, the checkpoint loading still works + # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH + # in deepspeed json config. + for idx in range(len(self.data_cluster_paths)): + if '/' in self.data_cluster_paths[idx]: + self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1] + if self.global_rank == 0: + for cluster_fname in self.data_cluster_paths: + cluster_path = f"{cluster_root_path}/{cluster_fname}" + self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..872d05de01453aea76fc850de77da3acbbcea76c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py @@ -0,0 +1,627 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py +""" + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# copied from fairseq/fairseq/data/indexed_dataset.py +# Removed IndexedRawTextDataset since it relied on Fairseq dictionary +# other slight modifications to remove fairseq dependencies +# Added document index to index file and made it accessible. +# An empty sentence no longer separates documents. + +# Some of the fixes/improvements are adopted from +# https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py + +from functools import lru_cache +import os +import shutil +import struct +from itertools import accumulate + +import numpy as np +import torch + + +def __best_fitting_dtype(vocab_size=None): + if vocab_size is not None and vocab_size < 65500: + return np.uint16 + else: + return np.int32 + + +def get_available_dataset_impl(): + return ['lazy', 'cached', 'mmap'] + + +def infer_dataset_impl(path): + if IndexedDataset.exists(path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + if magic == IndexedDataset._HDR_MAGIC: + return 'cached' + elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: + return 'mmap' + else: + return None + else: + print(f"Dataset does not exist: {path}") + print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") + return None + + +def make_builder(out_file, impl, vocab_size=None): + if impl == 'mmap': + return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size)) + else: + return IndexedDatasetBuilder(out_file) + + +def make_dataset(path, impl, skip_warmup=False): + if not IndexedDataset.exists(path): + print(f"Dataset does not exist: {path}") + print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") + return None + if impl == 'infer': + impl = infer_dataset_impl(path) + if impl == 'lazy' and IndexedDataset.exists(path): + return IndexedDataset(path) + elif impl == 'cached' and IndexedDataset.exists(path): + return IndexedCachedDataset(path) + elif impl == 'mmap' and MMapIndexedDataset.exists(path): + return MMapIndexedDataset(path, skip_warmup) + print(f"Unknown dataset implementation: {impl}") + return None + + +def dataset_exists(path, impl): + if impl == 'mmap': + return MMapIndexedDataset.exists(path) + else: + return IndexedDataset.exists(path) + + +def read_longs(f, n): + a = np.empty(n, dtype=np.int64) + f.readinto(a) + return a + + +def write_longs(f, a): + f.write(np.array(a, dtype=np.int64)) + + +# valid metric_dtypes as numpy and torch types +dtypes = { + 1: (np.uint8, torch.uint8), + 2: (np.int8, torch.int8), + 3: (np.int16, torch.int16), + 4: (np.int32, torch.int32), + 5: (np.int64, torch.int64), + 6: (np.uint16, None), + 7: (np.uint32, None), + 8: (np.uint64, None), +} + +valid_dtypes = set([dt[0] for dt in dtypes.values()] + [dt[1] for dt in dtypes.values() if dt[1] is not None]) + + +def code(dtype): + for c, (np_dt, torch_dt) in dtypes.items(): + if dtype in [np_dt, torch_dt]: + return c + raise ValueError(f"{dtype} not supported. Supported types: {valid_dtypes}") + + +def index_file_path(prefix_path): + return prefix_path + '.idx' + + +def data_file_path(prefix_path): + return prefix_path + '.bin' + + +def create_doc_idx(sizes): + doc_idx = [0] + for i, s in enumerate(sizes): + if s == 0: + doc_idx.append(i + 1) + return doc_idx + + +class IndexedDataset(torch.utils.data.Dataset): + """Loader for IndexedDataset""" + _HDR_MAGIC = b'TNTIDX\x00\x00' + + def __init__(self, path): + super().__init__() + self.path = path + self.data_file = None + self.read_index(path) + + def read_index(self, path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + assert magic == self._HDR_MAGIC, ('Index file doesn\'t match expected format. ' + 'Make sure that --dataset-impl is configured properly.') + version = f.read(8) + assert struct.unpack('= self._len: + raise IndexError('index out of range') + + def __del__(self): + if self.data_file: + self.data_file.close() + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if not self.data_file: + self.read_data(self.path) + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + return a + elif isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + if step != 1: + raise ValueError("Slices into indexed_dataset must be contiguous") + sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]] + size = sum(sizes) + a = np.empty(size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[start] * self.element_size) + self.data_file.readinto(a) + offsets = list(accumulate(sizes)) + sents = np.split(a, offsets[:-1]) + return sents + + def __len__(self): + return self._len + + def num_tokens(self, index): + return self.sizes[index] + + def size(self, index): + return self.sizes[index] + + @staticmethod + def exists(path): + return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))) + + @property + def supports_prefetch(self): + return False # avoid prefetching to save memory + + +class IndexedCachedDataset(IndexedDataset): + + def __init__(self, path): + super().__init__(path) + self.cache = None + self.cache_index = {} + + @property + def supports_prefetch(self): + return True + + def prefetch(self, indices): + if all(i in self.cache_index for i in indices): + return + if not self.data_file: + self.read_data(self.path) + indices = sorted(set(indices)) + total_size = 0 + for i in indices: + total_size += self.data_offsets[i + 1] - self.data_offsets[i] + self.cache = np.empty(total_size, dtype=self.dtype) + ptx = 0 + self.cache_index.clear() + for i in indices: + self.cache_index[i] = ptx + size = self.data_offsets[i + 1] - self.data_offsets[i] + a = self.cache[ptx:ptx + size] + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + ptx += size + if self.data_file: + # close and delete data file after prefetch so we can pickle + self.data_file.close() + self.data_file = None + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + ptx = self.cache_index[i] + np.copyto(a, self.cache[ptx:ptx + a.size]) + return a + elif isinstance(idx, slice): + # Hack just to make this work, can optimizer later if necessary + sents = [] + for i in range(*idx.indices(len(self))): + sents.append(self[i]) + return sents + + +class IndexedDatasetBuilder(object): + + def __init__(self, out_file, dtype=np.int32): + self.out_file = open(out_file, 'wb') + self.dtype = dtype + self.data_offsets = [0] + self.dim_offsets = [0] + self.sizes = [] + self.element_size = self.dtype().itemsize + self.doc_idx = [0] + + def add_item(self, tensor): + bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) + self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) + for s in tensor.size(): + self.sizes.append(s) + self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) + + def end_document(self): + self.doc_idx.append(len(self.sizes)) + + def merge_file_(self, another_file): + index = IndexedDataset(another_file) + assert index.dtype == self.dtype + + doc_offset = len(self.sizes) + + begin = self.data_offsets[-1] + for data_offset in index.data_offsets[1:]: + self.data_offsets.append(begin + data_offset) + self.sizes.extend(index.sizes) + begin = self.dim_offsets[-1] + for dim_offset in index.dim_offsets[1:]: + self.dim_offsets.append(begin + dim_offset) + self.doc_idx.extend((doc_offset + index.doc_idx)[1:]) + + with open(data_file_path(another_file), 'rb') as f: + while True: + data = f.read(1024) + if data: + self.out_file.write(data) + else: + break + + def finalize(self, index_file): + self.out_file.close() + index = open(index_file, 'wb') + index.write(b'TNTIDX\x00\x00') + index.write(struct.pack(' [0, 10, 30, 35] + if arr.size > 1: + arr[1:] = arr[:-1] + if arr.size > 0: + arr[0] = 0 + + +def get_pointers_with_total(sizes, elemsize, dtype): + """Return a numpy array of type np.dtype giving the byte offsets. + + Multiplies values in the sizes array by elemsize (bytes), + and then computes an exclusive scan to get byte offsets. + Returns the total number of bytes as second item in a tuple. + """ + + # scale values in sizes array by elemsize to get sizes in bytes + pointers = np.array(sizes, dtype=dtype) + pointers *= elemsize + np.cumsum(pointers, axis=0, out=pointers) + + # get total number of bytes from all sizes (last element) + bytes_last = pointers[-1] if len(sizes) > 0 else 0 + + # convert to byte offsets + exscan_from_cumsum_(pointers) + + return pointers, bytes_last + + +class MMapIndexedDataset(torch.utils.data.Dataset): + + class Index(object): + _HDR_MAGIC = b'MMIDIDX\x00\x00' + + @classmethod + def writer(cls, path, dtype): + + class _Writer(object): + + def __enter__(self): + self._file = open(path, 'wb') + + self._file.write(cls._HDR_MAGIC) + self._file.write(struct.pack('= 0: + if max_value <= 255: + return np.uint8 + elif max_value <= 65535: + return np.uint16 + elif max_value <= 4294967295: + return np.uint32 + else: + return np.uint64 + else: + if max_value <= 127 and min_value >= -128: + return np.int8 + elif max_value <= 32767 and min_value >= -32768: + return np.int16 + elif max_value <= 2147483647 and min_value >= -2147483648: + return np.int32 + else: + return np.int64 + + +def split_index(start_idx, end_idx, num_partitions): + partition_boundaries = np.linspace(start_idx, end_idx, dtype=int, num=num_partitions + 1) + return [(partition_boundaries[i], partition_boundaries[i + 1]) for i in range(num_partitions)] + + +def split_dataset(dataset, num_workers, worker_id, num_threads): + worker_splits = split_index(0, len(dataset), num_workers) + thread_splits = split_index(worker_splits[worker_id][0], worker_splits[worker_id][1], num_threads) + return worker_splits, thread_splits + + +def create_mmap_dataset_builder(fname, dtype): + logger.info(f"Creating mmap dataset builder at {fname}.") + return MMapIndexedDatasetBuilder(f"{fname}.bin", dtype=dtype) + + +def close_mmap_dataset_builder(builder, fname): + builder.end_document() + builder.finalize(f"{fname}.idx") + logger.info(f"Finalized mmap dataset builder at {fname}.") diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2b943b0992da45e0881b16043780813e02439d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py @@ -0,0 +1,3610 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import re +import stat +import torch +import hashlib +from collections import defaultdict, OrderedDict, deque +from shutil import copyfile +import gc + +from torch.nn.modules import Module +from torch.nn.parameter import Parameter +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from typing import Callable, Dict, Union, Iterable + +import deepspeed + +from deepspeed import comm as dist +from deepspeed.runtime.utils import see_memory_usage, DummyOptim +from .zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer +from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, ZeRORuntimeException +from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload +from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION + +from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer +from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer +from deepspeed.runtime.bf16_optimizer import BF16_Optimizer + +from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ + ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ + TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT, ZERO_ONE_ADAM_OPTIMIZER, MUADAM_OPTIMIZER, MUADAMW_OPTIMIZER, \ + MUSGD_OPTIMIZER, LION_OPTIMIZER + +from deepspeed.runtime.dataloader import DeepSpeedDataLoader +from deepspeed.runtime.constants import \ + ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \ + PLD_THETA, PLD_GAMMA, BFLOAT16, FP16, AMP, GRADIENT_ACCUMULATION_STEPS, \ + DATA_PARALLEL_GROUP, GLOBAL_RANK +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.compression import compression_scheduler +from deepspeed.compression.constants import \ + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, \ + WEIGHT_QUANTIZATION, SHARED_PARAMETERS, \ + WEIGHT_QUANTIZE_ENABLED, \ + WEIGHT_QUANTIZE_GROUPS, \ + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE, \ + WEIGHT_QUANTIZE_CHANGE_RATIO, \ + WEIGHT_QUANTIZE_TYPE, \ + WEIGHT_QUANTIZE_ROUNDING, \ + WEIGHT_QUANTIZE_VERBOSE, \ + WEIGHT_QUANTIZE_KERNEL +from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FROZEN_PARAM_FRAGMENTS +from deepspeed.runtime.sparse_tensor import SparseTensor + +from deepspeed.runtime import lr_schedules +from deepspeed.utils import groups +from deepspeed.utils import logger, log_dist, instrument_w_nvtx +from deepspeed.utils.timer import NoopTimer, ThroughputTimer, SynchronizedWallClockTimer, \ + FORWARD_MICRO_TIMER, BACKWARD_MICRO_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_REDUCE_MICRO_TIMER, \ + STEP_MICRO_TIMER, \ + FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_GLOBAL_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER, \ + STEP_GLOBAL_TIMER +from deepspeed.utils.debug import debug_extract_module_and_param_names, debug_clear_module_and_param_names +from deepspeed.monitor.monitor import MonitorMaster +from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop +from deepspeed.runtime.utils import clip_grad_norm_ +from deepspeed.runtime.eigenvalue import Eigenvalue +from deepspeed.runtime.data_pipeline.constants import DATA_SAMPLING, \ + DATA_ROUTING, DATA_SAMPLING_ENABLED, CURRICULUM_LEARNING, \ + CURRICULUM_LEARNING_ENABLED, DATA_SAMPLING_NUM_WORKERS, RANDOM_LTD, \ + RANDOM_LTD_ENABLED, RANDOM_LTD_LAYER_ID, RANDOM_LTD_LAYER_NUM, \ + RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE, RANDOM_LTD_LAYER_TOKEN_LR_ENABLED, \ + RANDOM_LTD_GLOBAL_BATCH_SIZE, RANDOM_LTD_MICRO_BATCH_SIZE, DATA_EFFICIENCY +from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler +from deepspeed.runtime.data_pipeline.data_routing.scheduler import RandomLTDScheduler +from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict +from deepspeed.runtime.data_pipeline.data_routing.basic_layer import RandomLayerTokenDrop + +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine +from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + +from .pipe.module import PipelineModule +from .utils import get_ma_status +from .compiler import CompiledModuleWrapper +from ..ops.adam import FusedAdam +from ..moe.sharded_moe import TopKGate, MOELayer +from ..moe.layer import MoE +from ..moe.utils import is_moe_param, configure_moe_param_groups +from ..git_version_info import version + +from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler +from deepspeed.utils.logging import print_json_dist, print_configuration + +from deepspeed.accelerator import get_accelerator + +from deepspeed.runtime.config import DtypeEnum + +MEMORY_OPT_ALLREDUCE_SIZE = 500000000 + +DeepSpeedOptimizerCallable = \ + Callable[[Union[Iterable[Parameter], Dict[str, Iterable]]], Optimizer] +DeepSpeedSchedulerCallable = Callable[[Optimizer], _LRScheduler] + +try: + import apex + from apex import amp + APEX_INSTALLED = True +except ImportError: + # Fail silently so we don't spam logs unnecessarily if user isn't using amp + APEX_INSTALLED = False + + +def split_half_float_double_sparse(tensors): + device_type = get_accelerator().device_name() + supported_types = get_accelerator().supported_dtypes() + + for t in tensors: + assert t.dtype in supported_types, f"attempting to reduce an unsupported grad type: {t.dtype}" + + sparse_tensor_buckets, dense_tensor_buckets = [], [] + for i, dtype in enumerate(supported_types): + sparse_bucket, dense_bucket = [], [] + for t in tensors: + if t.dtype == dtype: + if isinstance(t, SparseTensor): + sparse_bucket.append(t) + else: + dense_bucket.append(t) + if sparse_bucket: + sparse_tensor_buckets.append((dtype, sparse_bucket)) + if dense_bucket: + dense_tensor_buckets.append((dtype, dense_bucket)) + return sparse_tensor_buckets, dense_tensor_buckets + + +class EngineTimers(object): + r"""Wallclock timers for DeepSpeedEngine""" + + def __init__(self, enable_micro_timers, enable_global_timers): + self.forward_timers = [] + self.backward_timers = [] + self.backward_inner_timers = [] + self.backward_reduce_timers = [] + self.step_timers = [] + self.global_timers = [] + self.micro_timers = [] + + if enable_micro_timers: + self.forward_timers += [FORWARD_MICRO_TIMER] + self.backward_timers += [BACKWARD_MICRO_TIMER] + self.backward_inner_timers += [BACKWARD_INNER_MICRO_TIMER] + self.backward_reduce_timers += [BACKWARD_REDUCE_MICRO_TIMER] + self.step_timers += [STEP_MICRO_TIMER] + self.micro_timers += [ + FORWARD_MICRO_TIMER, BACKWARD_MICRO_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_REDUCE_MICRO_TIMER, + STEP_MICRO_TIMER + ] + + if enable_global_timers: + self.forward_timers += [FORWARD_GLOBAL_TIMER] + self.backward_timers += [BACKWARD_GLOBAL_TIMER] + self.backward_inner_timers += [BACKWARD_INNER_GLOBAL_TIMER] + self.backward_reduce_timers += [BACKWARD_REDUCE_GLOBAL_TIMER] + self.step_timers += [STEP_GLOBAL_TIMER] + self.global_timers += [ + FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_GLOBAL_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER, + STEP_GLOBAL_TIMER + ] + + +class DeepSpeedEngine(Module): + r"""DeepSpeed engine for training.""" + + def __init__(self, + args, + model, + optimizer=None, + model_parameters=None, + training_data=None, + lr_scheduler=None, + mpu=None, + dist_init_required=None, + collate_fn=None, + config=None, + config_class=None, + dont_change_device=False): + super(DeepSpeedEngine, self).__init__() + self.dont_change_device = dont_change_device + self.client_optimizer = optimizer + self.client_lr_scheduler = lr_scheduler + self.training_data = training_data + self.collate_fn = collate_fn + self.mpu = mpu + self.all_to_all_group = None + self.data_parallel_group = None + self.global_steps = 0 + self.global_samples = 0 + self.micro_steps = 0 + self.skipped_steps = 0 + self.gradient_average = True + self.warn_unscaled_loss = True + self.config = config + self._config = config_class + self.loaded_checkpoint_mp_world_size = None + self.loaded_checkpoint_dp_world_size = None + self.enable_backward_allreduce = True + self.progressive_layer_drop = None + self.eigenvalue = None + self.block_eigenvalue = None + self.gas_boundary_ctr = 0 + self.dist_backend = get_accelerator().communication_backend_name() + self.has_moe_layers = False + self.num_experts = [] + self.gate_modules = [] + self.moe_layers = [] + self._step_applied = False + self._global_grad_norm = None + self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. + + self.checkpoint_engine = None + + self._is_gradient_accumulation_boundary = None + self.scale_wrt_gas = None + self.losses = None + + # for debug purposes - can then debug print: debug_get_module_name(module) + debug_extract_module_and_param_names(model) + + self._do_args_sanity_check(args) + self._configure_with_arguments(args, mpu) + self._do_sanity_check() + see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown()) + if mpu is not None: + if self.elasticity_enabled(): + if not self.is_elastic_model_parallel_supported(): + assert not self.elasticity_enabled(), ("Elasticity is not currently supported" + " with model parallelism.") + + self._set_distributed_vars(args) + + dist.configure(self._config) + + self.monitor = MonitorMaster(self._config.monitor_config) + + see_memory_usage( + f"DeepSpeed Engine: Before configure distributed model", + force=self.memory_breakdown(), + ) + + self.pipeline_parallelism = isinstance(model, PipelineModule) + + # Configure distributed model + self._configure_distributed_model(model) + + # needed for zero_to_fp32 weights reconstruction to remap nameless data to state_dict + self.param_names = {param: name for name, param in model.named_parameters()} + + self._get_model_parameters() + + see_memory_usage(f"DeepSpeed Engine: After configure distributed model") + + # Configure wall clock timers + self.timers = SynchronizedWallClockTimer() + # Throughput timer + self.tput_timer = ThroughputTimer( + batch_size=self.train_batch_size(), + steps_per_output=self.steps_per_print(), + monitor_memory=False, + ) + + log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", ranks=[0]) + + if self.flops_profiler_enabled(): + self.flops_profiler = FlopsProfiler(self.module, self, self.flops_profiler_recompute_fwd_factor()) + + if training_data: + self.training_dataloader = self.deepspeed_io(training_data) + else: + self.training_dataloader = None + + # Configure optimizer and scheduler + self.optimizer = None + self.basic_optimizer = None + self.lr_scheduler = None + has_optimizer = False + + if optimizer or self.optimizer_name(): + has_optimizer = True + # If no parameters given by init default to module parameters + if model_parameters is None: + model_parameters = self.module.parameters() + + # Convert model parameters from generator to list + if not isinstance(model_parameters, list): + model_parameters = list(model_parameters) + + if has_optimizer: + self._configure_optimizer(optimizer, model_parameters) + self._configure_lr_scheduler(lr_scheduler) + self._report_progress(0) + elif self.zero_optimization(): + # no optim selected but zero is enabled + self.optimizer = self._configure_zero_optimizer(optimizer=None) + elif self.bfloat16_enabled(): + self.optimizer = self._configure_bf16_optimizer(optimizer=None) + + # Hook optimizer for snip_momentum pruning + if hasattr(model, 'pruners'): + from ..compression.helper import rewrite_optimizer_step + self.optimizer.pruners = model.pruners + rewrite_optimizer_step(self.optimizer) + + # Bookkeeping for sparse support + self.sparse_tensor_module_names = set() + # if self.sparse_gradients_enabled(): + for name, module in self.module.named_modules(): + if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)) and self.sparse_gradients_enabled(): + self.sparse_tensor_module_names.add(name + ".weight") + logger.info("Will convert {} to sparse tensor during training".format(name)) + + self.save_non_zero_checkpoint = False + self.save_zero_checkpoint = False + if not isinstance(self.optimizer, DeepSpeedZeRoOffload): + self._configure_checkpointing(dist_init_required) + + if self.eigenvalue_enabled(): + self.eigenvalue = self._configure_eigenvalue() + + if self.pld_enabled(): + self.progressive_layer_drop = self._configure_progressive_layer_drop() + + if self.curriculum_enabled_legacy(): + self.curriculum_scheduler_legacy = self._configure_curriculum_scheduler_legacy() + + if self.random_ltd_enabled(): + random_ltd_config = self.random_ltd_config() + random_ltd_config[RANDOM_LTD_GLOBAL_BATCH_SIZE] = self.train_batch_size() + random_ltd_config[RANDOM_LTD_MICRO_BATCH_SIZE] = self.train_micro_batch_size_per_gpu() + self.random_ltd_scheduler = self._configure_random_ltd_scheduler(random_ltd_config) + + # Engine timers + + self.engine_timers = EngineTimers(enable_micro_timers=self.wall_clock_breakdown(), + enable_global_timers=self.wall_clock_breakdown() + or self.flops_profiler_enabled()) + + if self.global_rank == 0: + self._config.print("DeepSpeedEngine configuration") + if self.dump_state(): + print_configuration(self, "DeepSpeedEngine") + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + if self._config.compile_config.enabled: + self._set_client_model(CompiledModuleWrapper(self.module, self._config.compile_config)) + + def destroy(self): + if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): + self.optimizer.destroy() + debug_clear_module_and_param_names() + + def _get_model_parameters(self): + if self.autotuning_profile_model_info(): + self.autotuning_model_info = {} + num_params = 0 + trainable_num_params = 0 + + for p in self.module.parameters(): + # since user code might call deepspeed.zero.Init() before deepspeed.initialize(), need to check the attribute to check if the parameter is partitioned in zero 3 already or not + n = 0 + if hasattr(p, "ds_tensor"): # if the parameter is partitioned in zero 3 + n += p.ds_numel + else: # if the parameter is not partitioned in zero 3 yet + n += p.numel() + num_params += n + if p.requires_grad: + trainable_num_params += n + if self.global_rank == 0: + self.autotuning_model_info["num_params"] = num_params * self.mp_world_size + self.autotuning_model_info["trainable_num_params"] = trainable_num_params * self.mp_world_size + + logger.info(f"model parameter = {num_params}") + + def get_batch_info(self): + """Get all training batch related settings. + Returns: + train_batch_size (int): The effective training batch size. This is the amount of data + samples that leads to one step of model update. + train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one + step (without gradient accumulation). + gradient_accumulation_steps (int): Number of training steps to accumulate gradients + before averaging and applying them. + """ + return ( + self.train_batch_size, + self.train_micro_batch_size_per_gpu, + self.gradient_accumulation_steps, + ) + + def set_train_batch_size(self, train_batch_size): + """Adjust the global batch size by increasing or decreasing the number of + micro-batches (i.e., gradient accumulation steps). The size of each micro-batch + (i.e., ``train_micro_batch_size_per_gpu``) is not changed. + Args: + train_batch_size (int): The new global batch size for training. + Raises: + ValueError: if ``train_batch_size`` is not divisible by the + configured micro-batch size and data parallelism. + """ + if train_batch_size % (self.train_micro_batch_size_per_gpu() * self.dp_world_size) != 0: + #print(f'{train_batch_size=} {self.train_micro_batch_size_per_gpu()=} {self.dp_world_size=}') + raise ValueError(f'Train batch size must be divisible by micro-batch data parallelism') + new_gas = train_batch_size // (self.train_micro_batch_size_per_gpu() * self.dp_world_size) + # overwrite config + self._config.train_batch_size = train_batch_size + self._config.gradient_accumulation_steps = new_gas + + def set_train_micro_batch_size(self, micro_batch_size): + """Adjust the micro batch size(i.e., the micro batch size in every data parallel group), + while keep the gradient accumulation steps the same. + Args: + micro_batch_size (int): The new micro batch size for training. + """ + # overwrite config + new_global_batch_size = micro_batch_size * self._config.gradient_accumulation_steps * self.dp_world_size + self._config.train_batch_size = new_global_batch_size + self._config.train_micro_batch_size_per_gpu = micro_batch_size + + def set_data_post_process_func(self, post_process_func): + if self.training_dataloader is not None: + self.training_dataloader.post_process_func = post_process_func + + def set_custom_curriculum_learning_schedule(self, schedule_func_dict): + if self.training_dataloader is not None and self.curriculum_learning_enabled(): + self.training_dataloader.data_sampler.set_custom_curriculum_learning_schedule(schedule_func_dict) + + def get_global_grad_norm(self) -> float: + """Return the 2-norm of all gradients. If there is model parallelism, + the norm will be global. + The computed norm will be cached and reused until the next step() pass. + .. note:: + In the presence of model parallelism, this is a collective call + and acts as a barrier among ``mpu.get_model_parallel_group()``. + Returns: + float: norm + """ + return self._global_grad_norm + + def __getattr__(self, name): + """ + Pass through attributes defined in the model if they are not overridden by ds-engine. + """ + + _module = {} + if "module" in self.__dict__: + _module = self.__dict__['module'] + if name in dir(self): + return getattr(self, name) + elif name in dir(_module): + return getattr(_module, name) + elif isinstance(_module, CompiledModuleWrapper): + try: + return getattr(_module, name) + except AttributeError: + raise AttributeError( + f"None of {type(self).__name__}, CompiledModuleWrapper, or the wrapped model has the attribute '{name}'" + ) + else: + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") + + def checkpoint_tag_validation_enabled(self): + return self._config.checkpoint_tag_validation_enabled + + def checkpoint_tag_validation_fail(self): + return self._config.checkpoint_tag_validation_fail + + def elasticity_enabled(self): + return self._config.elasticity_enabled + + def is_elastic_model_parallel_supported(self): + if self.elasticity_enabled(): + # Add code for finding number of GPUs per node automatically + if self._config.num_gpus_per_node % self._config.elastic_model_parallel_size == 0: + return True + else: + return False + + def pld_enabled(self): + return self._config.pld_enabled + + def pld_params(self): + return self._config.pld_params + + def pld_theta(self): + return self.pld_params()[PLD_THETA] + + def pld_gamma(self): + return self.pld_params()[PLD_GAMMA] + + def eigenvalue_enabled(self): + return self._config.eigenvalue_enabled + + def eigenvalue_verbose(self): + return self._config.eigenvalue_verbose + + def eigenvalue_max_iter(self): + return self._config.eigenvalue_max_iter + + def eigenvalue_tol(self): + return self._config.eigenvalue_tol + + def eigenvalue_stability(self): + return self._config.eigenvalue_stability + + def eigenvalue_gas_boundary_resolution(self): + return self._config.eigenvalue_gas_boundary_resolution + + def eigenvalue_layer_name(self): + return self._config.eigenvalue_layer_name + + def eigenvalue_layer_num(self): + return self._config.eigenvalue_layer_num + + def curriculum_enabled_legacy(self): + return self._config.curriculum_enabled_legacy + + def curriculum_params_legacy(self): + return self._config.curriculum_params_legacy + + def data_efficiency_enabled(self): + return self._config.data_efficiency_enabled + + def data_efficiency_config(self): + return self._config.data_efficiency_config + + def data_sampling_enabled(self): + return self._config.data_efficiency_config[DATA_SAMPLING][DATA_SAMPLING_ENABLED] + + def data_sampling_config(self): + return self._config.data_efficiency_config[DATA_SAMPLING] + + def curriculum_learning_enabled(self): + return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED] + + def curriculum_learning_config(self): + return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING] + + def random_ltd_enabled(self): + return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD][RANDOM_LTD_ENABLED] + + def random_ltd_config(self): + return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD] + + def random_ltd_initialize(self): + assert self.random_ltd_enabled() + random_ltd_config = self.random_ltd_config() + random_ltd_queue = deque([x for x in sorted(random_ltd_config[RANDOM_LTD_LAYER_ID])]) + count = 0 + for name, layer in self.module.named_modules(): + if isinstance(layer, RandomLayerTokenDrop): + if len(random_ltd_queue) != 0 and str(random_ltd_queue[0]) in name: ###[1,2,3] + layer.init_config(random_ltd_config, self.random_ltd_scheduler, count) + random_ltd_queue.popleft() + count += 1 + + if random_ltd_config[RANDOM_LTD_LAYER_NUM] != count: + raise ValueError(f'random_ltd_layer_num {random_ltd_config[RANDOM_LTD_LAYER_NUM]} must be \ + equivalent to the len of random_ltd_layer_id {count}') + + if random_ltd_config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: + assert self.client_lr_scheduler is None + raise ValueError(f'not yet support') + #self.lr_scheduler = lr_schedules.WarmupLayerTokenDecayLR(self.optimizer, self.random_ltd_scheduler) + + def wall_clock_breakdown(self): + return self._config.wall_clock_breakdown + + def flops_profiler_enabled(self): + return self._config.flops_profiler_config.enabled or self.autotuning_enabled() + + def flops_profiler_recompute_fwd_factor(self): + return self._config.flops_profiler_config.recompute_fwd_factor + + def flops_profiler_profile_step(self): + step = self._config.flops_profiler_config.profile_step + if self._config.autotuning_config.enabled: + step = self.autotuning_start_profile_step() + return step + + def flops_profiler_module_depth(self): + return self._config.flops_profiler_config.module_depth + + def flops_profiler_top_modules(self): + return self._config.flops_profiler_config.top_modules + + def flops_profiler_detailed(self): + if self._config.autotuning_config.enabled: + return False + return self._config.flops_profiler_config.detailed + + def flops_profiler_output_file(self): + return self._config.flops_profiler_config.output_file + + def memory_breakdown(self): + return self._config.memory_breakdown + + def autotuning_enabled(self): + return self._config.autotuning_config.enabled + + def autotuning_start_profile_step(self): + return self._config.autotuning_config.start_profile_step + + def autotuning_end_profile_step(self): + return self._config.autotuning_config.end_profile_step + + def autotuning_metric_path(self): + path = self._config.autotuning_config.metric_path + if not path: + path = os.path.join(os.getcwd(), "autotuning_metric.json") + return path + + def autotuning_model_info_path(self): + path = self._config.autotuning_config.model_info_path + if not path: + path = os.path.join(os.getcwd(), "autotuning_model_info.json") + return path + + def autotuning_metric(self): + return self._config.autotuning_config.metric + + def autotuning_profile_model_info(self): + return self.autotuning_enabled( + ) and self._config.autotuning_config.model_info and self._config.autotuning_config.model_info.get( + "profile", False) + + def sparse_gradients_enabled(self): + return self._config.sparse_gradients_enabled + + def train_batch_size(self): + return self._config.train_batch_size + + def train_micro_batch_size_per_gpu(self): + return self._config.train_micro_batch_size_per_gpu + + def optimizer_name(self): + return (self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name) + + def optimizer_params(self): + return self._config.optimizer_params + + def optimizer_legacy_fusion(self): + return self._config.optimizer_legacy_fusion + + def scheduler_name(self): + return self._config.scheduler_name + + def scheduler_params(self): + return self._config.scheduler_params + + def quantize_training(self): + return ( + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_IN_FORWARD_ENABLED], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_GROUPS], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_CHANGE_RATIO], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_TYPE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ROUNDING], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_VERBOSE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_KERNEL], + ) + + def zero_optimization(self): + return self._config.zero_enabled + + def zero_allow_untested_optimizer(self): + return self._config.zero_allow_untested_optimizer + + def zero_force_ds_cpu_optimizer(self): + return self._config.zero_force_ds_cpu_optimizer + + def zero_reduce_scatter(self): + return self._config.zero_config.reduce_scatter + + def zero_overlap_comm(self): + return self._config.zero_config.overlap_comm + + def zero_offload_optimizer(self): + return self._config.zero_config.offload_optimizer + + def zero_offload_param(self): + return self._config.zero_config.offload_param + + def zero_use_cpu_optimizer(self): + if self._config.zero_config.offload_optimizer is not None: + return self._config.zero_config.offload_optimizer.device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme] + return False + + def zero_cpu_offload(self): + if self._config.zero_config.offload_optimizer is not None: + return self._config.zero_config.offload_optimizer.device == OffloadDeviceEnum.cpu + return False + + def zero_partial_offload(self): + return getattr(self._config.zero_config.offload_optimizer, "ratio", 1.0) + + def zero_sub_group_size(self): + return self._config.zero_config.sub_group_size + + def zero_optimization_stage(self): + return self._config.zero_optimization_stage + + def mics_shard_size(self): + return self._config.mics_shard_size + + def zero_reduce_bucket_size(self): + return self._config.zero_config.reduce_bucket_size + + def zero_multi_rank_bucket_allreduce(self): + return self._config.zero_config.use_multi_rank_bucket_allreduce + + def zero_allgather_bucket_size(self): + return self._config.zero_config.allgather_bucket_size + + def zero_optimization_partition_gradients(self): + return self.zero_optimization_stage() >= ZeroStageEnum.gradients + + def zero_optimization_partition_weights(self): + return self.zero_optimization_stage() >= ZeroStageEnum.weights + + def is_first_weights_partition_group(self): + ret = True if self.mics_shard_size() < 0 \ + and self.zero_optimization_partition_weights() else False + if self.mics_shard_size() > 0 and self.global_rank < self.mics_shard_size(): + ret = True + return ret + + def zero_contiguous_gradients(self): + return self._config.zero_config.contiguous_gradients + + def zero_load_from_fp32_weights(self): + return self._config.zero_config.load_from_fp32_weights + + def zero_elastic_checkpoint(self): + return self._config.zero_config.elastic_checkpoint + + def zero_has_nvme_offload(self): + if not hasattr(self.optimizer, "swap_optimizer"): + return False + return self.optimizer.swap_optimizer or self.optimizer.params_in_nvme_and_cpu + + def zero_max_live_parameters(self): + return self._config.zero_config.max_live_parameters + + def zero_max_reuse_distance(self): + return self._config.zero_config.max_reuse_distance + + def zero_prefetch_bucket_size(self): + return self._config.zero_config.prefetch_bucket_size + + def zero_param_persistence_threshold(self): + return self._config.zero_config.param_persistence_threshold + + def zero_model_persistence_threshold(self): + return self._config.zero_config.model_persistence_threshold + + def zero_gather_16bit_weights_on_model_save(self): + return self._config.zero_config.gather_16bit_weights_on_model_save + + def zero_grad_hooks(self): + return self._config.zero_config.grad_hooks + + def zero_legacy_stage1(self): + return self._config.zero_config.legacy_stage1 + + def zero_ignore_unused_parameters(self): + return self._config.zero_config.ignore_unused_parameters + + def graph_harvesting(self): + return self._config.graph_harvesting + + def fp16_enabled(self): + return self._config.fp16_enabled + + def bfloat16_enabled(self): + return self._config.bfloat16_enabled + + def fp16_master_weights_and_gradients(self): + return self._config.fp16_master_weights_and_gradients + + def amp_enabled(self): + return self._config.amp_enabled + + def amp_params(self): + return self._config.amp_params + + def fp16_auto_cast(self): + return self._config.fp16_auto_cast + + def loss_scale(self): + return self._config.loss_scale + + def gradient_accumulation_steps(self): + return self._config.gradient_accumulation_steps + + def use_node_local_storage(self): + return self._config.use_node_local_storage + + def load_universal_checkpoint(self): + return self._config.load_universal_checkpoint + + @property + def communication_data_type(self): + res = self._config.communication_data_type + if res is not None: + return res + + if self.fp16_enabled(): + return torch.float16 + + if self.bfloat16_enabled(): + return torch.bfloat16 + + return torch.float32 + + @communication_data_type.setter + def communication_data_type(self, value): + self._config.communication_data_type = value + + def postscale_gradients(self): + return not self._config.prescale_gradients + + def gradient_predivide_factor(self): + return self._config.gradient_predivide_factor + + def steps_per_print(self): + return self._config.steps_per_print + + def zero_allgather_partitions(self): + return self._config.zero_config.allgather_partitions + + def zero_round_robin_gradients(self): + return self._config.zero_config.round_robin_gradients + + def zero_hpz_partition_size(self): + return self._config.zero_config.zero_hpz_partition_size + + def zero_quantized_weights(self): + return self._config.zero_config.zero_quantized_weights + + def zero_quantized_nontrainable_weights(self): + return self._config.zero_config.zero_quantized_nontrainable_weights + + def zero_quantized_gradients(self): + return self._config.zero_config.zero_quantized_gradients + + def dump_state(self): + return self._config.dump_state + + def gradient_clipping(self): + return self._config.gradient_clipping + + def dynamic_loss_scale(self): + return self._config.loss_scale == 0 + + def initial_dynamic_scale(self): + return self._config.initial_dynamic_scale + + def dynamic_loss_scale_args(self): + return self._config.dynamic_loss_scale_args + + def swap_tensor_config(self): + return self._config.swap_tensor_config + + def aio_config(self): + return self._config.aio_config + + def get_data_types(self): + model_dtype = torch.float32 + if self.fp16_enabled(): + model_dtype = torch.float16 + elif self.bfloat16_enabled(): + model_dtype = torch.bfloat16 + + if self._config.grad_accum_dtype is None: + if model_dtype == torch.bfloat16 and not self.zero_optimization(): + grad_accum_dtype = torch.float32 + else: + grad_accum_dtype = model_dtype + else: + grad_accum_dtype = DtypeEnum(self._config.grad_accum_dtype).value + + return (model_dtype, grad_accum_dtype) + + def _optimizer_has_ckpt_event_prologue(self): + return self.optimizer is not None and hasattr(self.optimizer, 'checkpoint_event_prologue') + + def _optimizer_has_ckpt_event_epilogue(self): + return self.optimizer is not None and hasattr(self.optimizer, 'checkpoint_event_epilogue') + + def _configure_lr_scheduler(self, client_lr_scheduler): + # First check for scheduler in json configuration + lr_scheduler = self._scheduler_from_config(self.optimizer) + if lr_scheduler: + log_dist(f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}", ranks=[0]) + self.lr_scheduler = lr_scheduler + else: + if isinstance(client_lr_scheduler, Callable): + log_dist('DeepSpeed using client callable to create LR scheduler', ranks=[0]) + self.lr_scheduler = client_lr_scheduler(self.basic_optimizer) + else: + log_dist('DeepSpeed using client LR scheduler', ranks=[0]) + self.lr_scheduler = client_lr_scheduler + + log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0]) + + def _configure_checkpointing(self, dist_init_required): + self.checkpoint_engine = TorchCheckpointEngine() + + if self._config is not None and self._config.nebula_config.enabled: + try: + from deepspeed.runtime.checkpoint_engine.nebula_checkpoint_engine import \ + NebulaCheckpointEngine + self.checkpoint_engine = NebulaCheckpointEngine(config_params=self._config.nebula_config) + except ImportError as err: + logger.error(f"No torch_nebula was found! Will fall back to torch.save. Details: {err}") + self.checkpoint_engine = TorchCheckpointEngine() + + dp_rank = groups._get_sequence_data_parallel_rank() + + rank = self.local_rank if self.use_node_local_storage() else dp_rank + + # only the first data parallel process needs to store the model checkpoint + # if you want to use node local storage this must be done by rank 0 on each + # node + self.save_non_zero_checkpoint = (rank == 0) or (self.zero_optimization_partition_weights() + and self.is_first_weights_partition_group()) + + if self.zero_optimization() or self.bfloat16_enabled(): + param_rank = dist.get_rank(group=self.optimizer.dp_process_group) + + # Only the first parameter parallel process needs to store the + # optimizer state checkpoints for zero + self.save_zero_checkpoint = param_rank == dp_rank + + def _scheduler_from_config(self, optimizer): + scheduler_name = self.scheduler_name() + if scheduler_name is not None: + if hasattr(lr_schedules, scheduler_name): + scheduler = getattr(lr_schedules, scheduler_name) + else: + assert hasattr(torch.optim.lr_scheduler, + scheduler_name), f"DeepSpeed does not recognize LR scheduler {scheduler_name}" + + scheduler = getattr(torch.optim.lr_scheduler, scheduler_name) + + scheduler_params = self.scheduler_params() + instantiated_scheduler = scheduler(optimizer, **scheduler_params) + return instantiated_scheduler + else: + return None + + def _set_distributed_vars(self, args): + device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank + if device_rank >= 0: + get_accelerator().set_device(device_rank) + self.device = torch.device(get_accelerator().device_name(), device_rank) + self.world_size = dist.get_world_size() + self.global_rank = dist.get_rank() + else: + self.world_size = 1 + self.global_rank = 0 + self.device = torch.device(get_accelerator().device_name()) + + # Configure based on command line arguments + def _configure_with_arguments(self, args, mpu): + # After the distributed backend is initialized we are guaranteed the LOCAL_RANK + # environment variable is set. We must align args.local_rank to this value for + # backwards compatibility with scripts relying on [args|self].local_rank containing + # the correct local rank info. _do_args_sanity_check will ensure this is the case. + + if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ: + ompi_local_rank = os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK") + local_rank = os.environ.get('LOCAL_RANK', ompi_local_rank) + assert ompi_local_rank == local_rank, f"LOCAL_RANK ({local_rank}) != OMPI_COMM_WORLD_LOCAL_RANK ({ompi_local_rank}), " \ + "not sure how to proceed as we're seeing conflicting local rank info." + os.environ['LOCAL_RANK'] = local_rank + + self.local_rank = int(os.environ['LOCAL_RANK']) + if hasattr(args, 'local_rank'): + args.local_rank = self.local_rank + + # Validate command line arguments + def _do_args_sanity_check(self, args): + assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \ + "variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch's launcher. If using a " \ + "different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed." + + if hasattr(args, 'local_rank') and args.local_rank is not None: + assert isinstance(args.local_rank, + int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}" + if args.local_rank >= 0: + env_local_rank = int(os.environ.get("LOCAL_RANK")) + assert ( + env_local_rank == args.local_rank + ), f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}." + + def _is_supported_optimizer(self, optimizer_name): + return (optimizer_name in DEEPSPEED_OPTIMIZERS or getattr(torch.optim, optimizer_name, None) is not None) + + def _supported_optims(self): + FairseqOptimizer = None + try: + from fairseq.optim.fairseq_optimizer import FairseqOptimizer + except ImportError: + pass + + expected_optim_types = [Optimizer] + if FairseqOptimizer: + # fairseq optims are not torch.optim objects + expected_optim_types.append(FairseqOptimizer) + return expected_optim_types + + # Validate configuration based on command line arguments + def _do_sanity_check(self): + if self.fp16_enabled() and not get_accelerator().is_fp16_supported(): + raise ValueError("Type fp16 is not supported.") + + expected_optim_types = self._supported_optims() + expected_optim_types += [type(None), Callable] + assert isinstance(self.client_optimizer, tuple(expected_optim_types)), \ + f'Client Optimizer is of unexpected type {type(self.client_optimizer)}' + + if not self.client_optimizer: + if self.optimizer_name() is not None: + assert self._is_supported_optimizer( + self.optimizer_name()), "{} is not a supported DeepSpeed Optimizer".format(self.optimizer_name()) + + if (self.optimizer_name() == LAMB_OPTIMIZER or self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER): + assert (self.dynamic_loss_scale()), "DeepSpeed {} optimizer requires dynamic loss scaling".format( + self.optimizer_name()) + + # Detect invalid combinations of client optimizer and client scheduler + if isinstance(self.client_lr_scheduler, _LRScheduler): + assert isinstance(self.client_optimizer, Optimizer), \ + f'Client Optimizer (type = {type(self.client_optimizer)} is not instantiated but Client LR Scheduler is instantiated' + + def _broadcast_model(self): + + def is_replicated(p): + if hasattr(p, "ds_status") and p.ds_status is not ZeroParamStatus.AVAILABLE: + return False + return True + + for p in self.module.parameters(): + # Broadcast the model for different parameters + if is_moe_param(p): + if torch.is_tensor(p) and is_replicated(p): + dist.broadcast(p.data, + groups._get_expert_broadcast_src_rank(p.group_name), + group=self.expert_data_parallel_group[p.group_name]) + else: + if torch.is_tensor(p) and is_replicated(p): + dist.broadcast(p.data, groups._get_broadcast_src_rank(), group=self.seq_data_parallel_group) + + @staticmethod + def __check_params(model: Module, dtype: torch.dtype) -> None: + return + if not all(param.dtype == dtype for param in model.parameters()) and dist.get_rank() == 0: + raise ValueError(f"{dtype} is enabled but the following parameters have dtype that is " + f"not {dtype}: " + f"{[(n, p.dtype) for n, p in model.named_parameters() if p.dtype != dtype]}") + + def _set_client_model(self, model): + # register client model in _modules so that nn.module methods work correctly + modules = self.__dict__.get('_modules') + modules['module'] = model + # register module attribute in engine but avoid getattr + self.__dict__['module'] = model + + def _configure_distributed_model(self, model): + self._set_client_model(model) + is_zero_init_model = self.zero_optimization_partition_weights() and any( + [hasattr(param, "ds_id") for param in self.module.parameters()]) + + if self.fp16_enabled(): + if is_zero_init_model: + self.__check_params(self.module, torch.half) + self.module.half() + elif self.bfloat16_enabled(): + if is_zero_init_model: + self.__check_params(self.module, torch.bfloat16) + self.module.bfloat16() + else: + self.__check_params(self.module, torch.float) + + # zero.Init() handles device placement of model + if not (self.dont_change_device or is_zero_init_model): + self.module.to(self.device) + + # MoE related initialization + for _, module in self.module.named_modules(): + if isinstance(module, MoE): + self.has_moe_layers = True + self.num_experts.append(module.num_experts) + + if self.has_moe_layers: + for _, module in self.module.named_modules(): + if isinstance(module, TopKGate): + self.gate_modules.append(module) + if self.wall_clock_breakdown(): + module.wall_clock_breakdown = True + if isinstance(module, MOELayer): + self.moe_layers.append(module) + if self.wall_clock_breakdown(): + module.wall_clock_breakdown = True + + # Pass the mpu from here to groups. For subsequent use, just query groups + if self.mpu is not None: + groups.mpu = self.mpu + + # Set deepspeed parallelism spec. for the model including expert parallelism + for _, module in self.module.named_modules(): + if hasattr(module, 'set_deepspeed_parallelism'): + module.set_deepspeed_parallelism(self._config.use_data_before_expert_parallel_) + + # Query the groups module to get information about various parallel groups + self.local_all_to_all_group = None + if self.zero_quantized_gradients(): + log_dist("Using quantized gradients", ranks=[0]) + self.local_all_to_all_group = groups._get_local_all_to_all_group() + self.data_parallel_group = groups._get_data_parallel_group() + self.dp_world_size = groups._get_data_parallel_world_size() + self.seq_data_parallel_group = groups._get_sequence_data_parallel_group() + self.seq_dp_world_size = groups._get_sequence_data_parallel_world_size() + self.mp_world_size = groups._get_model_parallel_world_size() + self.expert_parallel_group = groups._get_expert_parallel_group_dict() + self.expert_data_parallel_group = groups._get_expert_data_parallel_group_dict() + self.sequence_parallel_size = groups._get_sequence_parallel_world_size() + if self.sequence_parallel_size > 1: + self.communication_data_type = self._config.seq_parallel_communication_data_type + + if not (self.amp_enabled() or is_zero_init_model): + self._broadcast_model() + + # check if parameters are duplicated in optimizer param_groups + def _check_for_duplicates(self, optimizer): + for name, param in self.module.named_parameters(): + param_id = id(param) + + def ids_list(group): + return [id(param) for param in group] + + occurrence = sum([ + ids_list(group['params']).count(param_id) if param_id in ids_list(group['params']) else 0 + for group in optimizer.param_groups + ]) + assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behavior." + + def _do_optimizer_sanity_check(self, basic_optimizer): + model_dtype, grad_accum_dtype = self.get_data_types() + zero_enabled = self.zero_optimization() + amp_enabled = self.amp_enabled() + # config based assertions + assert ( + not (amp_enabled and zero_enabled) + ), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2" + if zero_enabled: + if not is_zero_supported_optimizer(basic_optimizer): + assert ( + self.zero_allow_untested_optimizer() + ), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.' + + if self.global_rank == 0: + logger.warning("**** You are using ZeRO with an untested optimizer, proceed with caution *****") + if model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32 and self.zero_optimization_stage( + ) == 1 and not self.zero_cpu_offload(): + return BFLOAT16 + return ZERO_OPTIMIZATION + elif amp_enabled: + if model_dtype != grad_accum_dtype: + raise NotImplementedError( + "Model data type and gradient accumulation data type must be equal to use Amp") + if model_dtype == torch.bfloat16 or model_dtype == torch.float16: + raise NotImplementedError("Cannot enable both amp with (legacy) fp16 or bfloat16 mode") + try: + logger.info("Initializing Apex amp from: {}".format(amp.__path__)) + except NameError: + # If apex/amp is available it will be imported above + raise RuntimeError("Unable to import apex/amp, please make sure it is installed") + return AMP + # data type checks + elif model_dtype == grad_accum_dtype: + if model_dtype == torch.bfloat16: + if self.pipeline_parallelism: + logger.warning( + "**** BF16 gradient accumulation is not safe numerically with large number of accumulation steps, proceed with caution *****" + ) + return BFLOAT16 + else: + raise NotImplementedError( + "Bfloat16 wrapper must use a gradient accumulation type of fp32, enable ZeRO to use Bfloat16 gradient accumulation" + ) + if model_dtype == torch.float16: + return FP16 + # else optimizer_wrapper = None + elif model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32: + return BFLOAT16 + else: + raise NotImplementedError("unsupported mix of model dtype and gradient accumulation type") + + return None + + # Configure optimizer + def _configure_optimizer(self, client_optimizer, model_parameters): + if client_optimizer is None: + if self.has_moe_layers: + model_parameters = configure_moe_param_groups(model_parameters) + basic_optimizer = self._configure_basic_optimizer(model_parameters) + log_dist(f"Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer", ranks=[0]) + else: + if isinstance(client_optimizer, tuple(self._supported_optims())): + basic_optimizer = client_optimizer + log_dist('Using client Optimizer as basic optimizer', ranks=[0]) + else: + basic_optimizer = client_optimizer(model_parameters) + log_dist('Using client callable to create basic optimizer', ranks=[0]) + + if self.zero_use_cpu_optimizer() and not isinstance(basic_optimizer, deepspeed.ops.adam.DeepSpeedCPUAdam): + if self.zero_force_ds_cpu_optimizer(): + msg = f'You are using ZeRO-Offload with a client provided optimizer ({type(basic_optimizer)}) which in most cases will yield poor performance. Please either use deepspeed.ops.adam.DeepSpeedCPUAdam or set an optimizer in your ds-config (https://www.deepspeed.ai/docs/config-json/#optimizer-parameters). If you really want to use a custom optimizer w. ZeRO-Offload and understand the performance impacts you can also set <"zero_force_ds_cpu_optimizer": false> in your configuration file.' + raise ZeRORuntimeException(msg) + + basic_optimizer.param_groups[:] = [pg for pg in basic_optimizer.param_groups if len(pg["params"]) != 0] + log_dist("Removing param_group that has no 'params' in the basic Optimizer", ranks=[0]) + + self._check_for_duplicates(basic_optimizer) + + self.basic_optimizer = basic_optimizer + log_dist("DeepSpeed Basic Optimizer = {}".format(basic_optimizer.__class__.__name__), ranks=[0]) + + optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer) + + if optimizer_wrapper == ZERO_OPTIMIZATION: + self.optimizer = self._configure_zero_optimizer(basic_optimizer) + elif optimizer_wrapper == AMP: + amp_params = self.amp_params() + log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) + model, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params) + self._set_client_model(model) + self._broadcast_model() + # TODO: maybe need to broadcast experts differently? + elif optimizer_wrapper == FP16: + self.optimizer = self._configure_fp16_optimizer(basic_optimizer) + elif optimizer_wrapper == BFLOAT16: + self.optimizer = self._configure_bf16_optimizer(basic_optimizer) + else: + self.optimizer = basic_optimizer + + log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0]) + + self.compression_scheduler = self._configure_compression_scheduler() + self.quantizer = self._configure_quantization() + + def _configure_basic_optimizer(self, model_parameters): + optimizer_parameters = self.optimizer_params() + if optimizer_parameters is None: + optimizer_parameters = {} + # print(optimizer_parameters.keys()) + if "max_grad_norm" in optimizer_parameters.keys(): + raise ValueError( + "'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details" + ) + + if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]: + torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False) + adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT) + + # Optimizer name of Adam forces AdamW logic unless adam_w_mode is explicitly set + effective_adam_w_mode = self.optimizer_name() == ADAMW_OPTIMIZER or adam_w_mode + + if torch_adam: + if not effective_adam_w_mode: + optimizer = torch.optim.Adam(model_parameters, **optimizer_parameters) + else: + optimizer = torch.optim.AdamW(model_parameters, **optimizer_parameters) + else: + if self.zero_use_cpu_optimizer(): + from deepspeed.ops.adam import DeepSpeedCPUAdam + optimizer = DeepSpeedCPUAdam(model_parameters, + **optimizer_parameters, + adamw_mode=effective_adam_w_mode) + else: + from deepspeed.ops.adam import FusedAdam + + optimizer = FusedAdam( + model_parameters, + **optimizer_parameters, + adam_w_mode=effective_adam_w_mode, + ) + + elif self.optimizer_name() == ADAGRAD_OPTIMIZER: + if self.zero_use_cpu_optimizer(): + from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad + optimizer = DeepSpeedCPUAdagrad(model_parameters, **optimizer_parameters) + else: + optimizer = torch.optim.Adagrad(model_parameters, **optimizer_parameters) + elif self.optimizer_name() == LAMB_OPTIMIZER: + from deepspeed.ops.lamb import FusedLamb + + optimizer = FusedLamb(model_parameters, **optimizer_parameters) + elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER: + assert not self.zero_optimization(), "1bit-Adam is not compatible with ZeRO" + from deepspeed.runtime.fp16.onebit.adam import OnebitAdam + + optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters) + if not self.fp16_enabled(): + logger.warning(f"Currently the convergence of 1-bit Adam is only verified under FP16") + elif self.optimizer_name() == ZERO_ONE_ADAM_OPTIMIZER: + assert not self.zero_optimization(), "0/1 Adam is not compatible with ZeRO" + from deepspeed.runtime.fp16.onebit.zoadam import ZeroOneAdam + + optimizer = ZeroOneAdam(model_parameters, self, **optimizer_parameters) + if not self.fp16_enabled(): + logger.warning(f'Currently the convergence of 0/1 Adam is only verified under FP16') + elif self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER: + assert not self.zero_optimization(), "1bit-Lamb is not compatible with ZeRO" + from deepspeed.runtime.fp16.onebit.lamb import OnebitLamb + + optimizer = OnebitLamb(model_parameters, self, **optimizer_parameters) + if not self.fp16_enabled(): + logger.warning(f"Currently the convergence of 1-bit Lamb is only verified under FP16") + elif self.optimizer_name() == LION_OPTIMIZER: + if self.zero_use_cpu_optimizer(): + from deepspeed.ops.lion import DeepSpeedCPULion + optimizer = DeepSpeedCPULion(model_parameters, **optimizer_parameters) + else: + from deepspeed.ops.lion import FusedLion + optimizer = FusedLion(model_parameters, **optimizer_parameters) + elif self.optimizer_name() == MUADAM_OPTIMIZER: + try: + from mup import MuAdam + except ImportError: + logger.error(f"Install mup to use MuAdam optimizer") + optimizer = MuAdam(model_parameters, **optimizer_parameters) + elif self.optimizer_name() == MUADAMW_OPTIMIZER: + try: + from mup import MuAdamW + except ImportError: + logger.error(f"Install mup to use MuAdamW optimizer") + optimizer = MuAdamW(model_parameters, **optimizer_parameters) + elif self.optimizer_name() == MUSGD_OPTIMIZER: + try: + from mup import MuSGD + except ImportError: + logger.error(f"Install mup to use MuSGD optimizer") + optimizer = MuSGD(model_parameters, **optimizer_parameters) + else: + torch_optimizer = getattr(torch.optim, self.optimizer_name()) + optimizer = torch_optimizer(model_parameters, **optimizer_parameters) + return optimizer + + def _configure_compression_scheduler(self): + return compression_scheduler(self.module, self._config.compression_config) + + def _configure_random_ltd_scheduler(self, configs): + return RandomLTDScheduler(configs) + + def _configure_quantization(self): + ( + quantize_weight_in_forward, + quantize_enabled, + q_groups, + q_mixed_fp16, + q_change_ratio, + q_type, + q_rounding, + q_verbose, + use_quantizer_kernel, + ) = self.quantize_training() + if quantize_enabled and not quantize_weight_in_forward: + assert self.fp16_enabled( + ), "MoQ (quantize in optimization step) weight quantization is only supported for FP16" + quantizer = None + if quantize_enabled and not quantize_weight_in_forward: + from deepspeed.runtime.quantize import Quantizer + + quantizer = Quantizer( + q_groups, + q_mixed_fp16, + q_change_ratio, + q_type, + q_rounding, + q_verbose, + self.eigenvalue_enabled(), + use_quantizer_kernel, + self.eigenvalue_layer_num() if self.eigenvalue_enabled() else 0, + ) + return quantizer + + def _configure_fp16_optimizer(self, optimizer): + initial_dynamic_scale = self.initial_dynamic_scale() + dynamic_loss_args = self.dynamic_loss_scale_args() + clip_grad = self.gradient_clipping() + if APEX_INSTALLED: + fused_opts = (apex.optimizers.FusedAdam, FusedAdam) + else: + fused_opts = FusedAdam + if isinstance(optimizer, fused_opts) \ + or self.optimizer_name() in [ONEBIT_ADAM_OPTIMIZER, ZERO_ONE_ADAM_OPTIMIZER]: + if self.dynamic_loss_scale(): + log_dist(f'Creating fp16 optimizer with dynamic loss scale', ranks=[0]) + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + optimizer = FP16_Optimizer( + optimizer, + deepspeed=self, + dynamic_loss_scale=True, + initial_dynamic_scale=initial_dynamic_scale, + dynamic_loss_args=dynamic_loss_args, + mpu=self.mpu, + clip_grad=clip_grad, + fused_adam_legacy=self.optimizer_legacy_fusion(), + timers=timers, + has_moe_layers=self.has_moe_layers, + ) + else: + log_dist(f'Creating fp16 optimizer with static loss scale: {self.loss_scale()}', ranks=[0]) + optimizer = FP16_Optimizer( + optimizer, + deepspeed=self, + static_loss_scale=self.loss_scale(), + mpu=self.mpu, + clip_grad=clip_grad, + fused_adam_legacy=self.optimizer_legacy_fusion(), + has_moe_layers=self.has_moe_layers, + ) + else: + log_dist(f'Creating fp16 unfused optimizer with dynamic loss scale', ranks=[0]) + optimizer = FP16_UnfusedOptimizer( + optimizer, + deepspeed=self, + static_loss_scale=self.loss_scale(), + dynamic_loss_scale=self.dynamic_loss_scale(), + dynamic_loss_args=dynamic_loss_args, + mpu=self.mpu, + clip_grad=clip_grad, + fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER, + ) + + return optimizer + + def _configure_bf16_optimizer(self, optimizer): + clip_grad = self.gradient_clipping() + + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + log_dist('Creating BF16 optimizer', ranks=[0]) + + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + optimizer = BF16_Optimizer(optimizer, + self.param_names, + mpu=self.mpu, + clip_grad=clip_grad, + allgather_bucket_size=self.zero_allgather_bucket_size(), + dp_process_group=self.seq_data_parallel_group, + timers=timers, + grad_acc_dtype=self.get_data_types()[1], + graph_harvesting=self.graph_harvesting(), + immediate_grad_update=self._config.bfloat16_immediate_grad_update, + has_moe_layers=self.has_moe_layers) + + return optimizer + + def _configure_zero_optimizer(self, optimizer): + zero_stage = self.zero_optimization_stage() + + mics_shard_size = self.mics_shard_size() + model_dtype, gradient_accumulation_dtype = self.get_data_types() + + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + if self.zero_legacy_stage1(): + raise Exception( + "The deprecated version of ZeRO Stage 1 is not supported in deepspeed >= 0.5.9. Please downgrade to a version less than 0.5.9 if you need to use this deprecated version of ZeRO." + ) + + if zero_stage <= ZeroStageEnum.gradients: + overlap_comm = self.zero_overlap_comm() + contiguous_gradients = self.zero_contiguous_gradients() + round_robin_gradients = self.zero_round_robin_gradients() + assert not isinstance(optimizer, DummyOptim), "zero stage {} requires an optimizer".format(zero_stage) + + log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0]) + + if isinstance(self.module, PipelineModule): + if overlap_comm: + logger.warning("Pipeline parallelism does not support overlapped communication, will be disabled.") + overlap_comm = False + optimizer = DeepSpeedZeroOptimizer( + optimizer, + self.param_names, + timers=timers, + static_loss_scale=self.loss_scale(), + dynamic_loss_scale=self.dynamic_loss_scale(), + dynamic_loss_args=self.dynamic_loss_scale_args(), + clip_grad=self.gradient_clipping(), + contiguous_gradients=contiguous_gradients, + reduce_bucket_size=self.zero_reduce_bucket_size(), + use_multi_rank_bucket_allreduce=self.zero_multi_rank_bucket_allreduce(), + allgather_bucket_size=self.zero_allgather_bucket_size(), + dp_process_group=self.seq_data_parallel_group, + expert_parallel_group=self.expert_parallel_group if self.has_moe_layers else None, + expert_data_parallel_group=self.expert_data_parallel_group if self.has_moe_layers else None, + reduce_scatter=self.zero_reduce_scatter(), + overlap_comm=overlap_comm, + offload_optimizer_config=self.zero_offload_optimizer(), + mpu=self.mpu, + postscale_gradients=self.postscale_gradients(), + gradient_predivide_factor=self.gradient_predivide_factor(), + gradient_accumulation_steps=self.gradient_accumulation_steps(), + ignore_unused_parameters=self.zero_ignore_unused_parameters(), + partition_grads=zero_stage == ZeroStageEnum.gradients, + round_robin_gradients=round_robin_gradients, + has_moe_layers=self.has_moe_layers, + fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients(), + gradient_accumulation_dtype=gradient_accumulation_dtype, + communication_data_type=self.communication_data_type, + elastic_checkpoint=self.zero_elastic_checkpoint()) + + elif zero_stage == ZeroStageEnum.weights: + assert not self.has_moe_layers, "MoE not supported with Stage 3" + if isinstance(optimizer, DummyOptim): + log_dist("Creating ZeRO Offload", ranks=[0]) + zero_param_parallel_group = groups._get_zero_param_intra_parallel_group() + if self.zero_hpz_partition_size() > 1 and zero_param_parallel_group is None: + self._set_zero_group_parallelism() + zero_param_parallel_group = groups._get_zero_param_intra_parallel_group() + optimizer = DeepSpeedZeRoOffload( + self.module, + timers=timers, + ds_config=self.config, + overlap_comm=self.zero_overlap_comm(), + prefetch_bucket_size=self.zero_prefetch_bucket_size(), + max_reuse_distance=self.zero_max_reuse_distance(), + max_live_parameters=self.zero_max_live_parameters(), + param_persistence_threshold=self.zero_param_persistence_threshold(), + model_persistence_threshold=self.zero_model_persistence_threshold(), + offload_param_config=self.zero_offload_param(), + mpu=self.mpu, + zero_param_parallel_group=zero_param_parallel_group, + zero_quantized_weights=self.zero_quantized_weights(), + zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights(), + ) + else: + log_dist( + f'Creating fp16 ZeRO stage {zero_stage} optimizer,' + f' MiCS is enabled {mics_shard_size>0},' + f' Hierarchical params gather {self._config.mics_hierarchial_params_gather}', + ranks=[0]) + if mics_shard_size > 0: + return self._return_mics_optimizer(optimizer, timers) + + log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0]) + from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 + optimizer = DeepSpeedZeroOptimizer_Stage3( + self.module, + optimizer, + timers=timers, + ds_config=self.config, + static_loss_scale=self.loss_scale(), + dynamic_loss_scale=self.dynamic_loss_scale(), + dynamic_loss_args=self.dynamic_loss_scale_args(), + clip_grad=self.gradient_clipping(), + contiguous_gradients=self.zero_contiguous_gradients(), + reduce_bucket_size=self.zero_reduce_bucket_size(), + prefetch_bucket_size=self.zero_prefetch_bucket_size(), + max_reuse_distance=self.zero_max_reuse_distance(), + max_live_parameters=self.zero_max_live_parameters(), + param_persistence_threshold=self.zero_param_persistence_threshold(), + model_persistence_threshold=self.zero_model_persistence_threshold(), + dp_process_group=self.seq_data_parallel_group, + all2all_process_group=self.local_all_to_all_group, + reduce_scatter=self.zero_reduce_scatter(), + overlap_comm=self.zero_overlap_comm(), + offload_optimizer_config=self.zero_offload_optimizer(), + offload_param_config=self.zero_offload_param(), + sub_group_size=self.zero_sub_group_size(), + offload_ratio=self.zero_partial_offload(), + mpu=self.mpu, + postscale_gradients=self.postscale_gradients(), + gradient_predivide_factor=self.gradient_predivide_factor(), + gradient_accumulation_steps=self.gradient_accumulation_steps(), + aio_config=self.aio_config(), + gradient_accumulation_dtype=gradient_accumulation_dtype, + communication_data_type=self.communication_data_type, + zero_hpz_partition_size=self.zero_hpz_partition_size(), + zero_quantized_weights=self.zero_quantized_weights(), + zero_quantized_nontrainable_weights=self.zero_quantized_nontrainable_weights(), + ) + + else: + raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage)) + + return optimizer + + def _return_mics_optimizer(self, basic_optimizer, timers): + from deepspeed.runtime.zero.mics import MiCS_Optimizer + model_dtype, gradient_accumulation_dtype = self.get_data_types() + optimizer = MiCS_Optimizer(self.module, + basic_optimizer, + timers=timers, + ds_config=self.config, + static_loss_scale=self.loss_scale(), + dynamic_loss_scale=self.dynamic_loss_scale(), + dynamic_loss_args=self.dynamic_loss_scale_args(), + clip_grad=self.gradient_clipping(), + contiguous_gradients=self.zero_contiguous_gradients(), + reduce_bucket_size=self.zero_reduce_bucket_size(), + prefetch_bucket_size=self.zero_prefetch_bucket_size(), + max_reuse_distance=self.zero_max_reuse_distance(), + max_live_parameters=self.zero_max_live_parameters(), + param_persistence_threshold=self.zero_param_persistence_threshold(), + model_persistence_threshold=self.zero_model_persistence_threshold(), + dp_process_group=self.seq_data_parallel_group, + reduce_scatter=self.zero_reduce_scatter(), + overlap_comm=self.zero_overlap_comm(), + offload_optimizer_config=self.zero_offload_optimizer(), + offload_param_config=self.zero_offload_param(), + sub_group_size=self.zero_sub_group_size(), + mpu=self.mpu, + postscale_gradients=self.postscale_gradients(), + gradient_predivide_factor=self.gradient_predivide_factor(), + gradient_accumulation_steps=self.gradient_accumulation_steps(), + aio_config=self.aio_config(), + gradient_accumulation_dtype=gradient_accumulation_dtype, + communication_data_type=self.communication_data_type) + return optimizer + + def _configure_eigenvalue(self): + eigenvalue = Eigenvalue( + verbose=self.eigenvalue_verbose(), + max_iter=self.eigenvalue_max_iter(), + tol=self.eigenvalue_tol(), + stability=self.eigenvalue_stability(), + gas_boundary_resolution=self.eigenvalue_gas_boundary_resolution(), + layer_name=self.eigenvalue_layer_name(), + layer_num=self.eigenvalue_layer_num(), + ) + + return eigenvalue + + def _configure_progressive_layer_drop(self): + pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma()) + + return pld + + def _configure_curriculum_scheduler_legacy(self): + scheduler = CurriculumScheduler(self.curriculum_params_legacy()) + return scheduler + + @staticmethod + def is_map_style_dataset(obj): + return hasattr(obj, "__getitem__") and hasattr(obj, "__len__") + + @staticmethod + def is_iterable_style_dataset(obj): + return isinstance(obj, torch.utils.data.IterableDataset) # hasattr(obj, "__iter__") should work as well + + def dataloader_drop_last(self): + return self._config.dataloader_drop_last + + def was_step_applied(self) -> bool: + """Returns True if the latest ``step()`` produced in parameter updates. + Note that a ``False`` return is not an error condition. Steps are frequently + no-ops, such as between gradient accumulation boundaries or when overflows + occur. + Returns: + bool: Whether the latest ``step()`` modified model parameters. + """ + return self._step_applied + + def deepspeed_io(self, + dataset, + batch_size=None, + route=ROUTE_TRAIN, + pin_memory=True, + data_sampler=None, + collate_fn=None, + num_local_io_workers=None): + if not (self.is_map_style_dataset(dataset) or self.is_iterable_style_dataset(dataset)): + raise ValueError("Training data must be a torch Dataset") + + if batch_size is None: + batch_size = self.train_micro_batch_size_per_gpu() + + if collate_fn is None: + collate_fn = self.collate_fn + + # Currently we only use timer in train route + deepspeed_io_timer = None + if route == ROUTE_TRAIN: + deepspeed_io_timer = self.tput_timer + + # If mpu is provided, forward world size and parallel rank to sampler. + data_parallel_world_size = self.dp_world_size + data_parallel_rank = self.global_rank + if self.mpu is not None: + data_parallel_world_size = self.mpu.get_data_parallel_world_size() + data_parallel_rank = self.mpu.get_data_parallel_rank() + + if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL): + data_sampler = torch.utils.data.DistributedSampler( + dataset, + num_replicas=data_parallel_world_size, + rank=data_parallel_rank, + shuffle=False, + ) + + deepspeed_dataloader_config = {} + if self.curriculum_learning_enabled(): + deepspeed_dataloader_config = { + CURRICULUM_LEARNING: self.curriculum_learning_enabled(), + DATA_EFFICIENCY: self.data_efficiency_config(), + DATA_PARALLEL_GROUP: self.data_parallel_group, + GRADIENT_ACCUMULATION_STEPS: self.gradient_accumulation_steps(), + GLOBAL_RANK: self.global_rank, + DATA_SAMPLING_NUM_WORKERS: self.data_sampling_config()[DATA_SAMPLING_NUM_WORKERS] + } + + return DeepSpeedDataLoader(dataset=dataset, + batch_size=batch_size, + pin_memory=pin_memory, + collate_fn=collate_fn, + local_rank=self.local_rank, + tput_timer=deepspeed_io_timer, + num_local_io_workers=num_local_io_workers, + data_sampler=data_sampler, + data_parallel_world_size=data_parallel_world_size, + data_parallel_rank=data_parallel_rank, + dataloader_drop_last=self.dataloader_drop_last(), + deepspeed_dataloader_config=deepspeed_dataloader_config) + + def train(self, mode=True): + r"""""" + + self.warn_unscaled_loss = True + self.module.train(mode) + + def eval(self): + r"""""" + + self.warn_unscaled_loss = True + self.module.train(False) + + def _scale_loss_by_gas(self, prescaled_loss, eval_micro_batches=None): + # In pipeline evaluation, there is an option to use different micro-bs, which creates different number of + # micro batches, thus the training gas, is not valid in this case. need to use the number of eval_micro_batches + scaling_factor = self.gradient_accumulation_steps() if eval_micro_batches is None else eval_micro_batches + if isinstance(prescaled_loss, torch.Tensor): + scaled_loss = prescaled_loss / scaling_factor + elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list): + scaled_loss = [] + for l in prescaled_loss: + if isinstance(l, torch.Tensor): + scaled_loss.append(l / scaling_factor) + else: + scaled_loss.append(l) + else: + scaled_loss = prescaled_loss + if self.warn_unscaled_loss: + logger.warning(f"DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}") + self.warn_unscaled_loss = False + + return scaled_loss + + @instrument_w_nvtx + def forward(self, *inputs, **kwargs): + r"""Execute forward propagation + Arguments: + *inputs: Variable length input list + **kwargs: variable length keyword arguments + """ + + if self.autotuning_profile_model_info(): + ma = get_ma_status() + else: + see_memory_usage("Engine before forward", force=self.memory_breakdown()) + + flops_profiler_active = (self.flops_profiler_enabled() + and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0) + + # used to check quantization happens at step 0! + if self.global_steps == 0 and hasattr(self, "compression_scheduler"): + self.compression_scheduler.step(step_zero_check=True) + if self.quantizer: + tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( + ) == 2 else self.optimizer.fp16_groups + if self.compression_scheduler.weight_quantization_enabled: + self.quantizer.quantize( + tensor_to_quantize, + (self.optimizer.overflow if self.fp16_enabled() else False), + self.eigenvalue_enabled(), + None, + ) + + if flops_profiler_active: + self.flops_profiler.start_profile(ignore_list=None) + + if self.module.training: + if self.progressive_layer_drop: + kwargs.update(self.progressive_layer_drop.get_state()) + + if self.__class__.__name__ != "PipelineEngine": + # TODO: The above if condition is a HACK since for PipelineEngine + # it's difficult to inject argument in forward pass. + if self.module.training and self.curriculum_enabled_legacy(): + self.curriculum_scheduler_legacy.update_difficulty(self.global_steps + 1) + if self.curriculum_params_legacy()["curriculum_type"] == "seqlen": + kwargs.update({"curriculum_seqlen": self.curriculum_scheduler_legacy.get_current_difficulty()}) + + if self.module.training and self.random_ltd_enabled(): + self.random_ltd_scheduler.update_seq(self.global_steps) + + if self.zero_optimization_partition_weights(): + # Enable automated discovery of external parameters by indicating that + # we are in a forward pass. + for module in self.module.modules(): + module._parameters._in_forward = True + + self._start_timers(self.engine_timers.forward_timers) + + if self.training_dataloader is None: + self.tput_timer.start() + + if self.fp16_auto_cast(): + inputs = self._cast_inputs_half(inputs) + + loss = self.module(*inputs, **kwargs) + + if self.zero_optimization_partition_weights(): + # Disable automated discovery of external parameters + for module in self.module.modules(): + module._parameters._in_forward = False + + self._stop_timers(self.engine_timers.forward_timers) + + if flops_profiler_active: + self.flops_profiler.stop_profile() + + if self.autotuning_profile_model_info(): + activation_mem = get_ma_status() - ma + self.autotuning_model_info["activation_mem_per_gpu"] = activation_mem + print_json_dist(self.autotuning_model_info, [0], path=self.autotuning_model_info_path()) + exit() + else: + see_memory_usage("Engine after forward", force=self.memory_breakdown()) + return loss + + def _cast_inputs_half(self, inputs): + if isinstance(inputs, (list, tuple)): + new_inputs = [] + for v in inputs: + new_inputs.append(self._cast_inputs_half(v)) + return inputs.__class__(new_inputs) + elif isinstance(inputs, dict): + new_inputs = {} + for k, v in inputs.items(): + new_inputs[k] = self._cast_inputs_half(v) + return new_inputs + elif hasattr(inputs, 'half'): + return inputs.half() + else: + return inputs + + def print_forward_breakdown(self, fwd_time): + gate_time = 0.0 + moe_time = 0.0 + falltoall = 0.0 + salltoall = 0.0 + + for gate in self.gate_modules: + #logger.info(f"Individual TopK gate time: {gate.gate_time:.2f} ms") + gate_time += gate.gate_time + + for l in self.moe_layers: + #logger.info(f"MoE layer; total: {l.time_moe:.2f} ms, first alltoall: {l.time_falltoall:.2f}, second alltoall: {l.time_salltoall:.2f}") + moe_time += l.time_moe + falltoall += l.time_falltoall + salltoall += l.time_salltoall + + # TODO: Allreduce/average them across ranks for more accurate timing. + + # if deepspeed.comm.get_rank() == 0: + log_dist( + f"time (ms) | fwd: {fwd_time:.2f} (fwd_moe: {moe_time:.2f}, 1st_a2a: {falltoall:.2f}, 2nd_a2a: {salltoall:.2f}, top_k: {gate_time:.2f})", + ranks=[0]) + + @instrument_w_nvtx + def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE): + # Pass (PP) gas boundary flag to optimizer (required for zero) + self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() + # ZeRO stage >= 2 communicates during non gradient accumulation boundaries as well + if self.zero_optimization_partition_gradients(): + self.optimizer.overlapping_partition_gradients_reduce_epilogue() + + # Communicate only at gradient accumulation boundaries + elif self.is_gradient_accumulation_boundary(): + if self.zero_optimization_stage() == ZeroStageEnum.optimizer_states and hasattr( + self.optimizer, 'reduce_gradients'): + self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism) + else: + grads = None + self.buffered_allreduce_fallback(grads=grads, elements_per_buffer=bucket_size) + + @instrument_w_nvtx + def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_graph=False, scale_wrt_gas=True): + r"""Execute backward pass on the loss + Arguments: + loss: Torch tensor on which to execute backward propagation + allreduce_gradients: is deprecated, ignored, and will soon be removed' + retain_graph: bool, default: false + forward on user defined choice of retain_graph + """ + + see_memory_usage("Engine before backward", force=self.memory_breakdown()) + + if self.scale_wrt_gas is not None: + scale_wrt_gas = self.scale_wrt_gas + + if not allreduce_gradients: + logger.warning(f"Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed") + + # scale loss w.r.t. gradient accumulation if needed + if self.gradient_accumulation_steps() > 1 and scale_wrt_gas: + loss = self._scale_loss_by_gas(loss.float()) + + # Log training loss + mean_loss = loss.mean().detach() + self.losses = mean_loss if self.losses is None else self.losses + mean_loss + if self.monitor.enabled: + if self.is_gradient_accumulation_boundary(): + if self.global_rank == 0: + self.summary_events = [( + f"Train/Samples/train_loss", + self.losses.item(), + self.global_samples, + )] + self.monitor.write_events(self.summary_events) + + self._start_timers(self.engine_timers.backward_timers) + + assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \ + "must provide optimizer during init in order to use backward" + + self._start_timers(self.engine_timers.backward_inner_timers) + + if self.zero_optimization(): + self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() + self.optimizer.backward(loss, retain_graph=retain_graph) + elif self.amp_enabled(): + # AMP requires delaying unscale when inside gradient accumulation boundaries + # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations + delay_unscale = not self.is_gradient_accumulation_boundary() + with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: + scaled_loss.backward(retain_graph=retain_graph) + elif self.fp16_enabled(): + if self.eigenvalue_enabled(): + self.optimizer.backward(loss, create_graph=True, retain_graph=True) + else: + self.optimizer.backward(loss, retain_graph=retain_graph) + elif self.bfloat16_enabled(): + self.optimizer.backward(loss) + else: + if self.eigenvalue_enabled(): + loss.backward(create_graph=True, retain_graph=True) + else: + loss.backward(retain_graph=retain_graph) + + self._stop_timers(self.engine_timers.backward_inner_timers) + + self._start_timers(self.engine_timers.backward_reduce_timers) + + if allreduce_gradients and self.enable_backward_allreduce: + # Traditional code path that allreduces the module parameter grads + self.allreduce_gradients() + + self._stop_timers(self.engine_timers.backward_reduce_timers) + + self._stop_timers(self.engine_timers.backward_timers) + + if release_loss: + # loss.data = None + pass + + see_memory_usage("Engine after backward", force=self.memory_breakdown()) + + return loss + + def is_gradient_accumulation_boundary(self): + """ + Query whether the current micro-batch is at the boundary of + gradient accumulation, and thus will trigger gradient reductions and + an optimizer step. + + Returns: + bool: if the current step is a gradient accumulation boundary. + + """ + if self._is_gradient_accumulation_boundary is None: + return (self.micro_steps + 1) % \ + self.gradient_accumulation_steps() == 0 + else: + return self._is_gradient_accumulation_boundary + + def set_gradient_accumulation_boundary(self, is_boundary): + """ + Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional + feature and should be used with care. The state should be set before to the intended + value before each forward/backward. The final forward/backward should have the + boundary state set to True. This style allows client code to only call engine.step() once after all + the gradient accumulation passes are complete. See example below: + .. code-block:: python + engine.set_gradient_accumulation_boundary(False) + for _ in range(gradient_accumulation_steps - 1): + micro_batch = next(data_loader) + loss = engine(micro_batch) + engine.backward(loss) + engine.set_gradient_accumulation_boundary(True) + micro_batch = next(data_loader) + loss = engine(micro_batch) + engine.backward(loss) + engine.step() + Arguments: + is_boundary (bool): are we at a gradient accumulation boundary or not? + """ + self._is_gradient_accumulation_boundary = is_boundary + self.optimizer.is_gradient_accumulation_boundary = is_boundary + + def zero_grad(self): + """ + Zero parameter grads. + """ + for param_name, param in self.module.named_parameters(): + param.grad = None + + def clip_fp32_gradients(self): + clip_grad_norm_(parameters=self.module.parameters(), max_norm=self.gradient_clipping(), mpu=self.mpu) + + def _take_model_step(self, lr_kwargs, block_eigenvalue={}): + if self.gradient_clipping() > 0.0: + if not (self.fp16_enabled() or self.bfloat16_enabled() or self.amp_enabled() or self.zero_optimization()): + self.clip_fp32_gradients() + elif self.amp_enabled(): + # AMP's recommended way of doing clipping + # https://nvidia.github.io/apex/advanced.html#gradient-clipping + master_params = amp.master_params(self.optimizer) + clip_grad_norm_(parameters=master_params, max_norm=self.gradient_clipping(), mpu=self.mpu) + self.optimizer.step() + + if hasattr(self.optimizer, '_global_grad_norm'): + self._global_grad_norm = self.optimizer._global_grad_norm + + # Quantize the updated parameter if there is no overflow + if self.quantizer: + tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( + ) == 2 else self.optimizer.fp16_groups + if self.compression_scheduler.weight_quantization_enabled: + self.quantizer.quantize( + tensor_to_quantize, + (self.optimizer.overflow if self.fp16_enabled() else False), + self.eigenvalue_enabled(), + block_eigenvalue, + ) + # zero grad in basic optimizer could be unreliable and may not exhibit + # the behavior that we want + if self.bfloat16_enabled(): + # TODO: Temporary until bf16_optimizer and zero_optimizer are integrated + if self.zero_optimization() and hasattr(self.optimizer, "zero_grad"): + self.optimizer.zero_grad() + else: + pass + elif self.zero_optimization() or self.fp16_enabled() or self.amp_enabled(): + self.optimizer.zero_grad() + else: + self.zero_grad() + + report_progress = self.global_rank == 0 if self.global_rank else True + + # Check overflow here since in DS fp16 optimizer, the overflow is updated in above step() function. + overflow = False + if hasattr(self.optimizer, "overflow"): + overflow = self.optimizer.overflow + self._step_applied = not overflow + + if overflow: + self.skipped_steps += 1 + else: + self.compression_scheduler.step() + if self.lr_scheduler is not None: + try: + self.lr_scheduler.step(**(lr_kwargs or {})) + except TypeError: + # XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines. + # We don't currently have a way to specify lr_kwargs from + # pipe_engine.train_batch() + self.lr_scheduler.step(self.train_batch_size()) + + if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0: + self._report_progress(self.global_steps + 1) + + self.losses = None + self.global_steps += 1 + self.global_samples += self.train_batch_size() + + def step(self, lr_kwargs=None): + r"""Execute the weight update step after forward and backward propagation + on effective_train_batch. + """ + see_memory_usage("Engine before step", force=self.memory_breakdown()) + + # Check early because self.global_steps is incremented at some point here. + # TODO: Delay self.global_steps increment until very end of this function. + flops_profiler_active = self.flops_profiler_enabled( + ) and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0 + + self._start_timers(self.engine_timers.step_timers) + + assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \ + "must provide optimizer during init in order to use step" + + report_progress = False + + self._step_applied = False # assume False, will flip to True + + # Update the model when we reach gradient accumulation boundaries + if self.is_gradient_accumulation_boundary(): + self.gas_boundary_ctr += 1 + + if (self.eigenvalue_enabled() and (self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() == 0) + and self.quantizer.any_precision_switch()): + log_dist(f"computing eigenvalue...", ranks=[0]) + self.block_eigenvalue = self.eigenvalue.compute_eigenvalue(self.module, self.device, + self.optimizer.cur_scale) + + if self.progressive_layer_drop: + self.progressive_layer_drop.update_state(self.global_steps) + + if (self.eigenvalue_enabled() and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() + and self.quantizer.any_precision_switch()): + self._take_model_step(lr_kwargs, self.block_eigenvalue) + else: + self._take_model_step(lr_kwargs) + + report_progress = self.global_rank == 0 if self.global_rank else True + + self.tput_timer.stop(global_step=self.is_gradient_accumulation_boundary(), report_speed=report_progress) + + self._stop_timers(self.engine_timers.step_timers) + + # Log learning rate + if self.monitor.enabled: + if self.is_gradient_accumulation_boundary(): + if self.global_rank == 0: + self.summary_events = [(f"Train/Samples/lr", self.get_lr()[0], self.global_samples)] + + if self.fp16_enabled() and hasattr(self.optimizer, "cur_scale"): + self.summary_events.append(( + f"Train/Samples/loss_scale", + self.optimizer.cur_scale, + self.global_samples, + )) + + if (self.eigenvalue_enabled() + and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()): + ev_values = self.block_eigenvalue.values() + for i in range(len(ev_values)): + self.summary_events.append(( + f"Train/Eigenvalues/ModelBlockParam_{i}", + self.ev_values[i][0], + self.global_samples, + )) + self.monitor.write_events(self.summary_events) + + # Check flops profiling + if flops_profiler_active: + if self.autotuning_enabled(): + self.flops = self.flops_profiler.get_total_flops() * 3 + self.fwd_duration = self.flops_profiler.get_total_duration() + else: + self.flops_profiler.print_model_profile( + profile_step=self.global_steps, + module_depth=self.flops_profiler_module_depth(), + top_modules=self.flops_profiler_top_modules(), + detailed=self.flops_profiler_detailed(), + output_file=self.flops_profiler_output_file(), + ) + self.flops_profiler.end_profile() + + if self.autotuning_enabled() and self.global_steps == (self.autotuning_end_profile_step() + 1): + self._autotuning_exit() + + if self.wall_clock_breakdown(): + # Log micro timing and reset + self.timers.log(names=self.engine_timers.micro_timers, memory_breakdown=self.memory_breakdown()) + + if self.wall_clock_breakdown() or self.flops_profiler_enabled(): + # Log global timing and reset + if self.is_gradient_accumulation_boundary(): + if self.monitor.enabled: + self._write_monitor() + + if self.has_moe_layers: + fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False) + self.print_forward_breakdown(fwd_time=fwd_time) + + self.timers.log(self.engine_timers.global_timers) + + self.micro_steps += 1 + see_memory_usage("Engine after step", force=self.memory_breakdown()) + + def _start_timers(self, timer_names): + for name in timer_names: + self.timers(name).start() + + def _stop_timers(self, timer_names): + record = self.is_gradient_accumulation_boundary() and \ + self.flops_profiler_enabled() and \ + (self.global_steps >= self.flops_profiler_profile_step()) + for name in timer_names: + self.timers(name).stop(record=record) + + def _autotuning_exit(self): + if self.global_rank == 0: + msg = self.timers.get_mean([ + FORWARD_GLOBAL_TIMER, + BACKWARD_GLOBAL_TIMER, + STEP_GLOBAL_TIMER, + ], reset=False) + titer = 0.0 + titer += msg[FORWARD_GLOBAL_TIMER] if FORWARD_GLOBAL_TIMER in msg else 0 + titer += msg[BACKWARD_GLOBAL_TIMER] if BACKWARD_GLOBAL_TIMER in msg else 0 + titer += msg[STEP_GLOBAL_TIMER] if STEP_GLOBAL_TIMER in msg else 0 + titer *= self.gradient_accumulation_steps() + msg["latency"] = titer + msg["FLOPS_per_gpu"] = self.flops * 1_000_000 * self.gradient_accumulation_steps() / titer + msg["throughput"] = self.train_batch_size() * 1_000_000 / \ + msg["latency"] + print_json_dist(msg, [0], path=self.autotuning_metric_path()) + log_dist( + f"Wrote metrics to {self.autotuning_metric_path()}, {os.path.abspath(self.autotuning_metric_path())}", + ranks=[0]) + import atexit + atexit.register(print, "Autotuning: done with running current ds config.") + exit() + + def _write_monitor(self): + if self.global_rank == 0: + self.summary_events = [ + ( + f"Train/Samples/elapsed_time_ms_forward", + self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False), + self.global_samples, + ), + ( + f"Train/Samples/elapsed_time_ms_backward", + self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False), + self.global_samples, + ), + ( + f"Train/Samples/elapsed_time_ms_backward_inner", + self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False), + self.global_samples, + ), + ( + f"Train/Samples/elapsed_time_ms_backward_allreduce", + self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False), + self.global_samples, + ), + ( + f"Train/Samples/elapsed_time_ms_step", + self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False), + self.global_samples, + ), + ] + self.monitor.write_events(self.summary_events) + + def _get_optimizer_param(self, param_name): + result = [] + if not self.optimizer: + return result + for group in self.optimizer.param_groups: + if param_name in group: + result.append(group[param_name]) + else: + result.append(0.0) + return result + + def get_lr(self): + return self._get_optimizer_param("lr") + + def get_type(self): + return self._get_optimizer_param("type") + + def get_mom(self): + if self.optimizer_name() in ["SGD", "RMSprop"]: + return self._get_optimizer_param("momentum") + else: + return self._get_optimizer_param("betas") + + def get_pld_theta(self): + if self.progressive_layer_drop: + return self.progressive_layer_drop.get_theta() + else: + return None + + def _report_progress(self, step): + lr = self.get_lr() + mom = self.get_mom() + log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0]) + + def allreduce_bucket(self, bucket, dp_group, dp_world_size=None): + tensor = self.flatten(bucket) + + tensor_to_allreduce = tensor + + if self.communication_data_type != tensor.dtype: + tensor_to_allreduce = tensor.to(self.communication_data_type) + + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) + if self.postscale_gradients(): + if self.gradient_predivide_factor() != 1.0: + tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor()) + + dist.all_reduce(tensor_to_allreduce, group=dp_group) + if self.gradient_average: + if self.gradient_predivide_factor() != dp_world_size: + tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dp_world_size) + else: + tensor_to_allreduce.mul_(1. / dp_world_size) + dist.all_reduce(tensor_to_allreduce, group=dp_group) + + if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: + tensor.copy_(tensor_to_allreduce) + + return tensor + + def allreduce_and_copy(self, small_bucket, dp_group, dp_world_size=None): + allreduced = self.allreduce_bucket(small_bucket, dp_group, dp_world_size) + for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): + buf.copy_(synced) + + def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000, dp_world_size=None): + small_bucket = [] + numel = 0 + for tensor in bucket: + small_bucket.append(tensor) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) + small_bucket = [] + numel = 0 + if len(small_bucket) > 0: + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) + + def _get_gradients_for_reduction(self): + non_expert_grads = [] + expert_grads = {} + if self.has_moe_layers: + for key in self.expert_data_parallel_group.keys(): + expert_grads[key] = [] + + for param_name, param in self.module.named_parameters(): + if not param.requires_grad: + continue + + if param.grad is None: + # In cases where there is an imbalance of empty grads across + # ranks we must create empty grads, this will ensure that every + # rank is reducing the same size. In some cases it may make + # sense in the future to support the ability to average not + # w.r.t. world size but with a different value. + param.grad = torch.zeros(param.size(), dtype=param.dtype, device=param.device) + + grad_data = param.grad.data + if param_name in self.sparse_tensor_module_names or grad_data.is_sparse: + # Call param.grad without data to avoid problem with setting of updated grads + grad_data = SparseTensor(param.grad) + + if is_moe_param(param): + expert_grads[param.group_name].append(grad_data) + else: + non_expert_grads.append(grad_data) + + return non_expert_grads, expert_grads + + def _reduce_non_expert_gradients(self, grads, elements_per_buffer): + split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse(grads) + if self.pipeline_parallelism: + dp_group = self.mpu.get_data_parallel_group() + else: + dp_group = groups._get_sequence_data_parallel_group() + + for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): + if sparse_bucket_tuple: + bucket_type, sparse_bucket = sparse_bucket_tuple + self.sparse_allreduce_no_retain(sparse_bucket, dp_group=dp_group) + + for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): + if dense_bucket_tuple: + bucket_type, dense_bucket = dense_bucket_tuple + self.allreduce_no_retain(dense_bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) + + def _reduce_expert_gradients(self, expert_grads, elements_per_buffer): + # to maintain the gradients value unaffected by ep_size setting, + # utilize dp_world_size for allreduce average + dp_world_size = dist.get_world_size(groups._get_data_parallel_group()) + for ep_name, expert_grads_group in expert_grads.items(): + ep_dp_group = groups._get_expert_data_parallel_group(ep_name) + split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse( + expert_grads_group) + + for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): + if sparse_bucket_tuple: + bucket_type, sparse_bucket = sparse_bucket_tuple + self.sparse_allreduce_no_retain(sparse_bucket, dp_group=ep_dp_group, dp_world_size=dp_world_size) + + for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): + if dense_bucket_tuple: + bucket_type, dense_bucket = dense_bucket_tuple + # Separate between diff groups + self.allreduce_no_retain(dense_bucket, + dp_group=ep_dp_group, + numel_per_bucket=elements_per_buffer, + dp_world_size=dp_world_size) + + def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000): + if grads is None: + if hasattr(self.optimizer, "get_grads_for_reduction"): + # This is currently for BF16 optimizer + non_expert_grads, expert_grads = self.optimizer.get_grads_for_reduction() + else: + non_expert_grads, expert_grads = self._get_gradients_for_reduction() + else: + assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE" + non_expert_grads = grads + + self._reduce_non_expert_gradients(non_expert_grads, elements_per_buffer) + + if self.has_moe_layers: + self._reduce_expert_gradients(expert_grads, elements_per_buffer) + + def sparse_allreduce_no_retain(self, bucket, dp_group, dp_world_size=None): + allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group, dp_world_size) + # Densify sparse tensor and copy back to original location + for tensor in allreduced_sparses: + if tensor.is_sparse: + tensor.orig_dense_tensor.data = tensor.to_coo_tensor() + else: + tensor.orig_dense_tensor.copy_(tensor.to_dense()) + + def sparse_allreduce_bucket(self, bucket, dp_group, dp_world_size=None): + sparse_list = [] + for sparse in bucket: + sparse_list.append(self.sparse_allreduce(sparse, dp_group, dp_world_size)) + return sparse_list + + def sparse_allreduce(self, sparse, dp_group, dp_world_size=None): + original_data_type = sparse.values.dtype + if self.communication_data_type != sparse.values.dtype: + if self.communication_data_type in (torch.float16, torch.bfloat16): + indices = sparse.indices.to(torch.int32) + else: + indices = sparse.indices + values = sparse.values.to(self.communication_data_type) + else: + indices = sparse.indices + values = sparse.values + + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) + if self.postscale_gradients(): + if self.gradient_average: + values.mul_(self.gradient_predivide_factor() / (dp_world_size / float(self.sequence_parallel_size))) + else: + values.mul_(1. / (dp_world_size / float(self.sequence_parallel_size))) + + indices_device_list = self.sparse_all_gather(indices, dp_group) + values_device_list = self.sparse_all_gather(values, dp_group) + + sparse.indices = torch.cat(indices_device_list).to(torch.long) + sparse.values = torch.cat(values_device_list).to(original_data_type) + return sparse + + def sparse_all_gather(self, value, dp_group): + my_size = torch.LongTensor([value.size()[0]]).to(self.device) + all_sizes = self.all_gather_scalar(my_size, dp_group) + max_size = torch.cat(all_sizes).max() + fill_size = max_size - my_size + + assert value.dim() in [1, 2] + if value.dim() == 1: + if fill_size > 0: + value = torch.cat([value, value.new_empty(fill_size)]) + tensor_list = [value.new_empty(max_size) for _ in range(dist.get_world_size(group=dp_group))] + else: + if fill_size > 0: + value = torch.cat([value, value.new_empty(fill_size, value.size()[1])]) + tensor_list = [ + value.new_empty(max_size, + value.size()[1]) for _ in range(dist.get_world_size(group=dp_group)) + ] + + dist.all_gather(tensor_list, value, group=dp_group) + tensors = [] + for dev_idx, t in enumerate(tensor_list): + size = all_sizes[dev_idx][0] + tensors.append(t.index_select(0, torch.arange(size, dtype=torch.long, device=self.device))) + + return tensors + + def all_gather_scalar(self, value, dp_group): + tensor_list = [value.new_zeros(value.size()) for _ in range(dist.get_world_size(group=dp_group))] + dist.all_gather(tensor_list, value, group=dp_group) + return tensor_list + + def module_state_dict(self, destination=None, prefix="", keep_vars=False, exclude_frozen_parameters=False): + sd = self.module.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) + + # Remove frozen parameter weights from state_dict if specified + if exclude_frozen_parameters: + for n, p in self.module.named_parameters(): + if not p.requires_grad and n in sd: + del sd[n] + + if self.random_ltd_enabled(): + sd = remove_random_ltd_state_dict(sd) + return sd + + @staticmethod + def load_moe_state_dict(checkpoint_path, + tag, + state_dict, + old_moe_load, + model=None, + mpu=None, + num_experts=1, + checkpoint_engine=TorchCheckpointEngine()): + if old_moe_load: + expp_rank = groups._get_expert_data_parallel_rank(groups._get_max_expert_size_name()) + + num_local_experts = max(num_experts) // groups._get_expert_parallel_world_size( + groups._get_max_expert_size_name()) + for local_expert_id in range(num_local_experts): + global_expert_id = expp_rank * num_local_experts + local_expert_id + expert_state_dict = checkpoint_engine.load( + DeepSpeedEngine._get_expert_ckpt_name( + checkpoint_path, + -1, # -1 means ignore layer_id + global_expert_id, + tag, + mpu), + map_location=torch.device('cpu')) + + # Updating global -> local expert ids + moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' + for key in list(expert_state_dict.keys()): + local_key = key.replace(f'{moe_str_prefix}{global_expert_id}', + f'{moe_str_prefix}{local_expert_id}') + expert_state_dict[local_key] = expert_state_dict.pop(key) + state_dict.update(expert_state_dict) + + else: + moe_layer_id = 0 + for n_module, module in model.named_modules(): + if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: + group_name = module.expert_group_name + num_local_experts = module.num_local_experts + expp_rank = groups._get_expert_parallel_rank(group_name) + # loop all local_experts + for local_expert_id in range(num_local_experts): + global_expert_id = expp_rank * num_local_experts + local_expert_id + expert_state_dict = checkpoint_engine.load(DeepSpeedEngine._get_expert_ckpt_name( + checkpoint_path, moe_layer_id, global_expert_id, tag, mpu), + map_location=torch.device('cpu')) + # print(expert_state_dict.keys()) + # Updating global -> local expert ids + moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' + for key in list(expert_state_dict.keys()): + local_key = key.replace(f'{moe_str_prefix}{global_expert_id}', + f'{moe_str_prefix}{local_expert_id}') + expert_state_dict[local_key] = expert_state_dict.pop(key) + state_dict.update(expert_state_dict) + moe_layer_id += 1 + + def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None, fetch_z3_params=False): + if fetch_z3_params: + params_to_fetch = [ + p for p in self.module.parameters() + if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE + ] + else: + params_to_fetch = [] + + with deepspeed.zero.GatheredParameters(params_to_fetch, modifier_rank=0): + module_state_dict = checkpoint['module'] + if custom_load_fn: + custom_load_fn(src=module_state_dict, dst=self.module) + else: + self.module.load_state_dict( + module_state_dict, # TODO + strict=strict) + + if checkpoint.get(FROZEN_PARAM_FRAGMENTS, None) is not None: + saved_frozen_params = checkpoint[FROZEN_PARAM_FRAGMENTS] + for param in self.module.parameters(): + if param.requires_grad: + continue + if param not in self.param_names: + raise ValueError(f"failed to find frozen {param} in named params") + name = self.param_names[param] + if hasattr(param, 'ds_id'): + param.ds_tensor.data.copy_(saved_frozen_params[name].data) + else: + param.data.copy_(saved_frozen_params[name].data) + + def _get_zero_ckpt_prefix(self, dp_rank, bf16_mode): + return f'{"bf16_" if bf16_mode else ""}zero_pp_rank_{dp_rank}' + + def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank, bf16_mode): + file_prefix = self._get_zero_ckpt_prefix(dp_rank, bf16_mode=bf16_mode) + zero_ckpt_name = os.path.join( + checkpoints_path, + str(tag), + f"{file_prefix}_mp_rank_{mp_rank:02d}_optim_states.pt", + ) + return zero_ckpt_name + + def _get_zero_ckpt_name(self, checkpoints_path, tag): + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + pp_rank = dist.get_rank(group=self.optimizer.dp_process_group) + bf16_mode = self.bfloat16_enabled() + return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank, bf16_mode) + + def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None): + if mp_placeholder is not None: + mp_rank_str = mp_placeholder + else: + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + mp_rank_str = f"{mp_rank:02d}" + + if self.zero_optimization_partition_weights(): + filename = "zero_pp_rank_{}".format(dist.get_rank(group=self.optimizer.dp_process_group)) + ckpt_name = os.path.join( + checkpoints_path, + str(tag), + f"{filename}_mp_rank_{mp_rank_str}_model_states.pt", + ) + else: + ckpt_name = os.path.join( + checkpoints_path, + str(tag), + "mp_rank_" + mp_rank_str + "_model_states.pt", + ) + return ckpt_name + + def _get_optimizer_ckpt_name(self, checkpoints_path, tag, expp_rank): + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + ckpt_name = os.path.join(checkpoints_path, str(tag), + f'expp_rank_{expp_rank}_mp_rank_{mp_rank:02d}_optim_states.pt') + return ckpt_name + + @staticmethod + def _get_expert_ckpt_name(checkpoints_path, layer_id, expert_id, tag, mpu=None): + mp_rank = 0 if mpu is None else mpu.get_model_parallel_rank() + if layer_id <= -1: + # Used to support old checkpoint loading + ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag), + f'expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt') + else: + # Used to support new checkpoint loading + ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag), + f'layer_{layer_id}_expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt') + return ckpt_name + + def _get_all_ckpt_names(self, checkpoints_path, tag): + # It is required that (checkpoints_path, tag) are consistent among all ranks. + ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*") + import glob + + ckpt_files = glob.glob(ckpt_file_pattern) + ckpt_files.sort() + return ckpt_files + + def load_checkpoint(self, + load_dir, + tag=None, + load_module_strict=True, + load_optimizer_states=True, + load_lr_scheduler_states=True, + load_module_only=False, + custom_load_fn=None): + """ + Load training checkpoint + + Arguments: + load_dir: Required. Directory to load the checkpoint from + tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file + load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match. + load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance + load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint. + load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting. + custom_load_fn: Optional. Custom model load function. + + Returns: + A tuple of ``load_path`` and ``client_state``. + *``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed. + *``client_state``: State dictionary used for loading required training states in the client code. + + Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right + after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and + ``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine + before ``load_checkpoint()``. + + """ + + if tag is None: + latest_tag = "latest_universal" if self.load_universal_checkpoint() else "latest" + latest_path = os.path.join(load_dir, latest_tag) + if os.path.isfile(latest_path): + with open(latest_path, "r") as fd: + tag = fd.read().strip() + else: + if self.load_universal_checkpoint(): + raise ValueError(f'Invalid for universal checkpoint: {latest_path} does not exist') + else: + logger.warning( + f"Unable to find latest file at {latest_path}, if trying to load latest " + "checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint." + ) + return None, None + + if self._optimizer_has_ckpt_event_prologue(): + # Prepare for checkpoint load by ensuring all parameters are partitioned + self.optimizer.checkpoint_event_prologue() + + load_path, client_states = self._load_checkpoint(load_dir, + tag, + load_module_strict=load_module_strict, + load_optimizer_states=load_optimizer_states, + load_lr_scheduler_states=load_lr_scheduler_states, + load_module_only=load_module_only, + custom_load_fn=custom_load_fn) + + load_zero_checkpoint = load_path is not None and (self.zero_optimization() or self.bfloat16_enabled()) + if load_zero_checkpoint: + if (load_optimizer_states and not load_module_only) or self.load_universal_checkpoint(): + success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states) + else: + success = False + if not success: + self.optimizer._restore_from_bit16_weights() + + if self.zero_has_nvme_offload(): + from shutil import copytree, disk_usage + offload_dir = self.optimizer.optimizer_swapper.swap_folder + offload_ckpt_dir = os.path.join(load_dir, tag, "offloaded_tensors") + _, _, free = disk_usage(offload_dir) + logger.info( + f"Copying NVMe offload checkpoint from {offload_ckpt_dir} to {offload_dir}, {free / 1e9:,.2f} GB free on target filesystem..." + ) + copytree(offload_ckpt_dir, offload_dir, dirs_exist_ok=True) + _, _, free = disk_usage(offload_dir) + logger.info(f"Copying complete! {free / 1e9:,.2f} GB free on target filesystem") + self.optimizer.reset_swap_buffers() + + if self._optimizer_has_ckpt_event_epilogue(): + self.optimizer.checkpoint_event_epilogue() + + if self.load_universal_checkpoint(): + self.optimizer.update_lp_params() + + return load_path, client_states + + def _load_checkpoint(self, + load_dir, + tag, + load_module_strict=True, + load_optimizer_states=True, + load_lr_scheduler_states=True, + load_module_only=False, + custom_load_fn=None): + + from deepspeed.runtime.state_dict_factory import SDLoaderFactory + + ckpt_list = self._get_all_ckpt_names(load_dir, tag) + sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine=self.checkpoint_engine) + + is_pipe_parallel = isinstance(self.module, PipelineModule) + + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + load_path, checkpoint, _ = sd_loader.load(self.mp_world_size, mp_rank, is_pipe_parallel=is_pipe_parallel) + + if checkpoint is None: + return None, None + + fetch_z3_params = False + if self.zero_optimization_partition_weights() and not load_optimizer_states: + checkpoint['module'] = get_fp32_state_dict_from_zero_checkpoint(load_dir) + fetch_z3_params = True + + if is_pipe_parallel: + # Pipeline parallelism uses this to load its own checkpoint files. + self._curr_ckpt_path = os.path.join(load_dir, tag) + + if self.has_moe_layers: + # print(checkpoint.keys()) + old_moe_load = False + if not isinstance(checkpoint['num_experts'], list): + old_moe_load = True + DeepSpeedEngine.load_moe_state_dict(load_dir, + tag, + state_dict=checkpoint['module'], + old_moe_load=old_moe_load, + model=self.module, + mpu=self.mpu, + num_experts=self.num_experts, + checkpoint_engine=self.checkpoint_engine) + if not self.load_universal_checkpoint(): + self.load_module_state_dict(checkpoint=checkpoint, + strict=load_module_strict, + custom_load_fn=custom_load_fn, + fetch_z3_params=fetch_z3_params) + + self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size'] + + optim_checkpoint = None + if load_module_only: + deepspeed_states = ['module'] + if self.optimizer is not None: + self.optimizer.refresh_fp32_params() + else: + has_zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() + if load_optimizer_states and self.optimizer is not None and not has_zero_optimizer_state: + if self.has_moe_layers: + largest_group_name = groups._get_max_expert_size_name() + expp_rank = groups._get_expert_parallel_rank(largest_group_name) + optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank) + optim_checkpoint = self.checkpoint_engine.load(optim_load_path, map_location=torch.device('cpu')) + else: + optim_checkpoint = checkpoint + + if self.fp16_enabled() or self.bfloat16_enabled(): + self.optimizer.load_state_dict(optim_checkpoint['optimizer'], + load_optimizer_states=load_optimizer_states) + else: + optim_checkpoint = checkpoint + + self.optimizer.load_state_dict(optim_checkpoint['optimizer']) + + if load_lr_scheduler_states and self.lr_scheduler is not None: + self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + + if self.random_ltd_enabled() and self.random_ltd_scheduler is not None and 'random_ltd' in checkpoint: + self.random_ltd_scheduler.load_state_dict(checkpoint['random_ltd']) + + if self.training_dataloader is not None and self.curriculum_learning_enabled( + ) and 'data_sampler' in checkpoint: + self.training_dataloader.data_sampler.load_state_dict(checkpoint['data_sampler']) + + def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters, loaded_parameters): + result = set() + + for name in original_set: + if name in loaded_parameters and name not in loaded_set: + continue # parameter existed in previous model and was not sparse + result.add(name) + + for name in loaded_set: + if name in original_parameters: + result.add(name) # parameter exists in both configs and it was sparse + + return result + + if 'sparse_tensor_module_names' in checkpoint: + sparse_tensor_module_names = checkpoint['sparse_tensor_module_names'] + elif 'csr_tensor_module_names' in checkpoint: + sparse_tensor_module_names = checkpoint['csr_tensor_module_names'] + else: + sparse_tensor_module_names = None + if sparse_tensor_module_names is not None: + if load_module_strict: + self.sparse_tensor_module_names = sparse_tensor_module_names + else: + self.sparse_tensor_module_names = get_sparse_tensor_module_names( + self.sparse_tensor_module_names, sparse_tensor_module_names, + dict(self.module.named_parameters()), checkpoint["module"]) + + self.global_steps = checkpoint['global_steps'] + self.global_samples = checkpoint.get('global_samples', self.global_steps * self.train_batch_size()) + self.skipped_steps = checkpoint['skipped_steps'] + self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size'] + deepspeed_states = [ + 'module', 'sparse_tensor_module_names', 'skipped_steps', 'global_steps', 'dp_world_size', + 'mp_world_size', 'data_sampler', 'random_ltd' + ] + client_state = {} + + if load_lr_scheduler_states: + deepspeed_states.append('lr_scheduler') + if load_optimizer_states: + deepspeed_states.append('optimizer') + + client_state = {key: value for key, value in checkpoint.items() if not key in deepspeed_states} + + if optim_checkpoint is not None: + client_state['optimizer'] = optim_checkpoint['optimizer'] + + return load_path, client_state + + def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True): + + load_serial = None + # When use loading checkpoint serial, checkpoint loading start from local rank 0, + # all other local rank would be paused, waiting for its rank-1 peer ready and its notification. + if self._config.zero_config.pipeline_loading_checkpoint: + assert self.zero_optimization_stage( + ) == ZeroStageEnum.weights, "Only stage3 support for pipeline checkpoint loading" + load_serial = torch.zeros(1).to(self.device) + if dist.get_local_rank() != 0: + dist.recv(tensor=load_serial, src=dist.get_rank() - 1) + if self.load_universal_checkpoint(): + zero_sd_list = None + checkpoint_folder = f'{os.path.join(load_dir, tag)}' + else: + if load_optimizer_states and self.seq_dp_world_size != self.loaded_checkpoint_dp_world_size: + raise ZeRORuntimeException("The checkpoint being loaded used a DP " \ + f"world size of {self.loaded_checkpoint_dp_world_size} but the " \ + f"current world size is {self.seq_dp_world_size}. Automatic adjustment " \ + "of ZeRO's optimizer state partitioning with a new world size is not " \ + "currently supported.") + checkpoint_folder = None + zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag) + if zero_sd_list is None: + return False + + self.optimizer.load_state_dict(state_dict_list=zero_sd_list, + load_optimizer_states=load_optimizer_states, + load_from_fp32_weights=self.zero_load_from_fp32_weights(), + checkpoint_folder=checkpoint_folder, + load_serial=load_serial) + + if self.load_universal_checkpoint(): + logger.info(f'loaded universal zero checkpoints from {checkpoint_folder} for rank {self.global_rank}') + else: + logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}") + return True + + def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode): + zero_ckpt_names = [] + for dp_rank in range(dp_world_size): + ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir, + tag=tag, + mp_rank=mp_rank, + dp_rank=dp_rank, + bf16_mode=bf16_mode) + zero_ckpt_names.append(ckpt_name) + + return zero_ckpt_names + + def _get_all_zero_checkpoint_names(self, load_dir, tag, bf16_mode): + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() + zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(load_dir=load_dir, + tag=tag, + mp_rank=mp_rank, + dp_world_size=self.loaded_checkpoint_dp_world_size, + bf16_mode=bf16_mode) + for i, ckpt_name in enumerate(zero_ckpt_names): + if not os.path.exists(ckpt_name): + # transparently handle the old file pattern for optim_states + if "optim_states.pt" in ckpt_name: + ckpt_name_try = ckpt_name.replace("_optim_states.pt", "optim_states.pt") + if os.path.exists(ckpt_name_try): + zero_ckpt_names[i] = ckpt_name_try + continue + + return zero_ckpt_names + + def _get_all_zero_checkpoint_state_dicts(self, zero_ckpt_names): + zero_sd_list = [] + for i, ckpt_name in enumerate(zero_ckpt_names): + _state = None + if ckpt_name is None: + _state = {OPTIMIZER_STATE_DICT: None} + # Fully load state for current rank + elif self.zero_elastic_checkpoint() or dist.get_rank(group=self.optimizer.dp_process_group) == i: + _state = self.checkpoint_engine.load( + ckpt_name, + map_location='cpu', + ) + else: + _state = {OPTIMIZER_STATE_DICT: None} + zero_sd_list.append(_state) + + zero_optimizer_sd = [sd[OPTIMIZER_STATE_DICT] for sd in zero_sd_list] + logger.info(f"successfully read {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}") + return zero_optimizer_sd + + def _get_all_zero_checkpoints(self, load_dir, tag): + for bf16_mode in [self.bfloat16_enabled(), not self.bfloat16_enabled()]: + zero_ckpt_names = self._get_all_zero_checkpoint_names(load_dir, tag, bf16_mode) + if zero_ckpt_names is not None: + # Warn if loading checkpoint of different bit16 type + if bf16_mode is not self.bfloat16_enabled(): + checkpoint_bit16 = BFLOAT16 if bf16_mode else FP16 + engine_bit16 = BFLOAT16 if self.bfloat16_enabled() else FP16 + logger.warn(f'Loading {checkpoint_bit16} zero checkpoints into {engine_bit16} training engine') + return self._get_all_zero_checkpoint_state_dicts(zero_ckpt_names) + + return None + + def _checkpoint_tag_validation(self, tag): + if self.checkpoint_tag_validation_enabled(): + s_hash = hashlib.sha1(tag.encode()) + bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device) + max_bhash = bhash.clone() + min_bhash = bhash.clone() + dist.all_reduce(max_bhash, op=dist.ReduceOp.MAX) + dist.all_reduce(min_bhash, op=dist.ReduceOp.MIN) + valid = all(min_bhash == bhash) and all(max_bhash == bhash) + msg = (f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " + "all ranks. Including rank unique information in checkpoint tag could cause issues when " + "restoring with different world sizes.") + if self.checkpoint_tag_validation_fail(): + assert valid, msg + elif not valid: + logger.warning(msg) + + def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True, exclude_frozen_parameters=False): + """Save training checkpoint + + Arguments: + save_dir: Required. Directory for saving the checkpoint + tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is + used if not provided. Tag name must be the same across all ranks. + client_state: Optional. State dictionary used for saving required training states in the client code. + save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint. + exclude_frozen_parameters: Optional. Exclude frozen parameters from checkpointed state. + Important: all processes must call this method and not just the process with rank 0. It is + because each process needs to save its master weights and scheduler+optimizer states. This + method will hang waiting to synchronize with other processes if it's called just for the + process with rank 0. + + """ + if self._optimizer_has_ckpt_event_prologue(): + # Custom preparation for checkpoint save, if applicable + self.optimizer.checkpoint_event_prologue() + + rank = self.local_rank if self.use_node_local_storage() else self.global_rank + + # This is to make sure the checkpoint names are created without collision + # There seems to be issue creating them in parallel + + # Ensure save_dir directory exists + if rank == 0: + self.checkpoint_engine.makedirs(save_dir, exist_ok=True) + dist.barrier() + + if tag is None: + tag = f"global_step{self.global_steps}" + + # Ensure tag is a string + tag = str(tag) + self.checkpoint_engine.create(tag) + + # Ensure checkpoint tag is consistent across ranks + self._checkpoint_tag_validation(tag) + + if self.has_moe_layers: + self.save_non_zero_checkpoint = False + self._create_checkpoint_file(save_dir, tag, False) + self._save_moe_checkpoint(save_dir, + tag, + client_state=client_state, + exclude_frozen_parameters=exclude_frozen_parameters) + + # We distribute the task of saving layer checkpoint files among + # data parallel instances, so all procs should call _save_checkpoint. + # All procs then call module_state_dict(), but only procs of data + # parallel rank 0 save the general model params. + if not self.has_moe_layers: + self._create_checkpoint_file(save_dir, tag, False) + self._save_checkpoint(save_dir, + tag, + client_state=client_state, + exclude_frozen_parameters=exclude_frozen_parameters) + + if self.save_zero_checkpoint: + self._create_zero_checkpoint_files(save_dir, tag) + self._save_zero_checkpoint(save_dir, tag) + + if self.zero_has_nvme_offload(): + from shutil import copytree, disk_usage + offload_dir = self.optimizer.optimizer_swapper.swap_folder + offload_ckpt_dir = os.path.join(save_dir, tag, "offloaded_tensors") + _, _, free = disk_usage(save_dir) + logger.info( + f"Copying NVMe offload files from {offload_dir} to {offload_ckpt_dir}, {free / 1e9:,.2f} GB free on target filesystem..." + ) + copytree(offload_dir, + offload_ckpt_dir, + ignore=lambda _, dir_list: list(filter(lambda x: 'gradient' in x, dir_list)), + dirs_exist_ok=False) + _, _, free = disk_usage(save_dir) + logger.info(f"Copying complete! {free / 1e9:,.2f} GB free on target filesystem") + + if self._optimizer_has_ckpt_event_epilogue(): + self.optimizer.checkpoint_event_epilogue() + + # Save latest checkpoint tag + self.checkpoint_engine.commit(tag) + if save_latest and rank == 0: + with open(os.path.join(save_dir, 'latest'), 'w') as fd: + fd.write(tag) + + dist.barrier() + + return True + + def _get_non_moe_state_dict(self, full_state_dict): + """ + Get the state dict of the non-moe layers + """ + for key in list(full_state_dict.keys()): + if 'expert' in key and 'moe.gate.wg.weight' not in key: + full_state_dict.pop(key) + + return full_state_dict + + def _save_moe_checkpoint(self, save_dir, tag, client_state={}, exclude_frozen_parameters=False): + save_path = self._get_ckpt_name(save_dir, tag) + # A hack to save the checkpointing directory. Pipeline parallelism overrides + # module_state_dict() and uses this path to save the model. module_state_dict() + # then instead just returns None. + + # Using layer_#_export_# to save the model's expert state_dict + moe_layer_id = 0 + for n_module, module in self.module.named_modules(): + if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: + group_name = module.expert_group_name + num_local_experts = module.num_local_experts + expp_rank = groups._get_expert_parallel_rank(group_name) + exp_dp_rank = groups._get_expert_data_parallel_rank(group_name) + # print(expp_rank, exp_dp_rank) + if exp_dp_rank != 0: + moe_layer_id += 1 + continue + + # get all moe parameters + moe_state_dict = {} + for n, p in module.state_dict().items(): + if 'expert' in n and 'moe.gate.wg.weight' not in n: + moe_state_dict[n_module + '.' + n] = p + moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' + # print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines + # Reorder the moe name rank, so that each checkpoint only has one expert + experts_state_dict = defaultdict(dict) + for key in list(moe_state_dict.keys()): + m = re.match(f".*{moe_str_prefix}([0-9]+).*", key) + + local_expert_id = None + if not m: + logger.warn(f'No expert found in key {key}.') + else: + local_expert_id = m.group(1) + + global_expert_id = expp_rank * \ + num_local_experts + int(local_expert_id) + expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}', + f'{moe_str_prefix}{global_expert_id}') + # truncating extra tensor (shared) storage + truncated = moe_state_dict.pop(key).clone().detach() + experts_state_dict[str(global_expert_id)][expert_key] = truncated + + # let save the moe parameters + for global_expert_id, expert_state_dict in experts_state_dict.items(): + # save the moe parameters + moe_save_path = self._get_expert_ckpt_name(save_dir, moe_layer_id, global_expert_id, tag, self.mpu) + if self.random_ltd_enabled(): + expert_state_dict = remove_random_ltd_state_dict(expert_state_dict) + self.checkpoint_engine.save(expert_state_dict, moe_save_path) + moe_layer_id += 1 + + self._curr_ckpt_path = os.path.join(save_dir, tag) + + largest_group_name = groups._get_max_expert_size_name() + expp_rank = groups._get_expert_parallel_rank(largest_group_name) + exp_dp_rank = groups._get_expert_data_parallel_rank(largest_group_name) + + # In the case of E + D parallelism, only the + # first expert parallel group should save the expert weights + # since each expert parallel group is a copy of the model's experts + if exp_dp_rank == 0: + # Save optimizer states. They are different across each exp parallel rank. + optimizer_state = { + 'optimizer': self.optimizer.state_dict() if self.optimizer and not self.zero_optimization() else None + } + # TODO: why use BufferedWriter not the path + file_path = self._get_optimizer_ckpt_name(save_dir, tag, expp_rank) + self.checkpoint_engine.save(optimizer_state, file_path) + + # Load flow uses below saved file for model parameters, RNG and more + if groups._get_data_parallel_rank() == 0: + # Get non-moe parameters + # Classes DeepSpeedEngine and PipelineEngine have different behavior for method module_state_dict. + # DeepSpeedEngine returns the state dict, where PipelineEngine saves the state dict and returns None. + # We need to get the state dict, therefore, call to DeepSpeedEngine (base class for PipelineEngine) + model_state_dict = self._get_non_moe_state_dict( + DeepSpeedEngine.module_state_dict(self, exclude_frozen_parameters=exclude_frozen_parameters)) + + # TODO: update num experts info,.. in checkpoint + state = { + 'module': + model_state_dict, + 'lr_scheduler': + self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, + 'data_sampler': + self.training_dataloader.data_sampler.state_dict() if + (self.training_dataloader is not None and self.curriculum_learning_enabled()) else None, + 'random_ltd': + self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None, + 'sparse_tensor_module_names': + self.sparse_tensor_module_names, + 'skipped_steps': + self.skipped_steps, + 'global_steps': + self.global_steps, + 'global_samples': + self.global_samples, + 'dp_world_size': + self.dp_world_size, + 'mp_world_size': + self.mp_world_size, + 'num_experts': + self.num_experts + } + state.update(client_state) + logger.info(f'Saving model checkpoint: {save_path}') + self.checkpoint_engine.save(state, save_path) + + def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint): + name_function = (self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name) + try: + checkpoint_name = name_function(save_dir, tag) + path = os.path.dirname(checkpoint_name) + self.checkpoint_engine.makedirs(path, exist_ok=True) + except: + logger.error(f"Failed saving model checkpoint to {save_dir} with tag {tag}") + return False + + return True + + def _create_zero_checkpoint_files(self, save_dir, tag): + success = True + # zero checkpoint files are created sequentially + for rank in range(dist.get_world_size(self.optimizer.dp_process_group)): + if rank == self.global_rank: + success = self._create_checkpoint_file(save_dir, tag, True) + + dist.barrier(group=self.optimizer.dp_process_group) + + return success + + def _save_checkpoint(self, save_dir, tag, client_state={}, exclude_frozen_parameters=False): + + save_path = self._get_ckpt_name(save_dir, tag) + + zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() + + save_frozen_param = self.zero_optimization_partition_gradients() and not exclude_frozen_parameters + + # A hack to save the checkpointing directory. Pipeline parallelism overrides + # module_state_dict() and uses this path to save the model. module_state_dict() + # then instead just returns None. The module_state_dict() implementation in + # PipelineEngine expects the save path to be set in self._curr_ckpt_path. + self._curr_ckpt_path = os.path.join(save_dir, tag) + module = self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters) + self._curr_ckpt_path = None + + state = dict(module=module, + buffer_names=self._get_buffer_names(), + optimizer=self.optimizer.state_dict() if self.optimizer and not zero_optimizer_state else None, + param_shapes=self._get_zero_param_shapes() if self.optimizer and zero_optimizer_state else None, + frozen_param_shapes=self._get_zero_frozen_param_attributes(self._get_param_shape_func) + if save_frozen_param else None, + shared_params=self._get_shared_params() if self.optimizer and zero_optimizer_state else None, + frozen_param_fragments=self._get_zero_frozen_param_attributes(self._get_param_fragment_func) + if save_frozen_param else None, + lr_scheduler=self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, + data_sampler=self.training_dataloader.data_sampler.state_dict() if + (self.training_dataloader is not None and self.curriculum_learning_enabled()) else None, + random_ltd=self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None, + sparse_tensor_module_names=self.sparse_tensor_module_names, + skipped_steps=self.skipped_steps, + global_steps=self.global_steps, + global_samples=self.global_samples, + dp_world_size=self.seq_dp_world_size, + mp_world_size=self.mp_world_size, + ds_config=self.config, + ds_version=version) + state.update(client_state) + + if self.save_non_zero_checkpoint: + log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1]) + self.checkpoint_engine.save(state, save_path) + + def _get_buffer_names(self): + buffer_names = [] + + # we save buffer names so that we could extract later the real buffers from the saved + # state_dict["module"] in the non-zero checkpoint - the buffers are already there but they + # are intermixed with param placeholders + + # have to traverse the tree to be able to skip non-persistent buffers + def get_layer_named_buffers(module, prefix=""): + for name, buf in module.named_buffers(recurse=False): + if buf is not None and name not in module._non_persistent_buffers_set: + buffer_names.append(prefix + name) + + for name, child in module.named_children(): + if child is not None: + get_layer_named_buffers(child, prefix + name + ".") + + get_layer_named_buffers(self.module, prefix="") + + return buffer_names + + def _get_param_shape_func(self, param): + return param.ds_shape if hasattr(param, 'ds_id') else param.shape + + def _get_param_fragment_func(self, param): + return param.ds_tensor.detach().cpu() if hasattr(param, 'ds_id') else param.detach().cpu() + + def _get_zero_frozen_param_attributes(self, attr_func): + frozen_param_fragments = OrderedDict() + + for param in self.module.parameters(): + if param.requires_grad: + continue + if param not in self.param_names: + raise ValueError(f"failed to find frozen {param} in named params") + name = self.param_names[param] + frozen_param_fragments[name] = attr_func(param) + + return frozen_param_fragments + + def _get_zero_param_shapes(self): + """Returns a dict of name to shape mapping, only for the flattened fp32 weights saved by the + optimizer. the names are exactly as in state_dict. The order is absolutely important, since + the saved data is just flattened data with no identifiers and requires reconstruction in the + same order it was saved. + We can't rely on self.module.named_parameters() to get the saved tensors, as some params + will be missing and others unsaved and then it'd be impossible to reconstruct state_dict + from the flattened weights. + optimizer.bit16_groups seems to be the easiest to use as it's in all zeroX versions. + """ + param_group_shapes = [] + cnt = 0 + numel = 0 + + # zero2 started using a round_robin_bit16_groups which is a shuffled version of bit16_groups - + # if we don't use it, we get parameters ordered incorrectly + if hasattr(self.optimizer, "round_robin_bit16_groups"): + bit16_groups = self.optimizer.round_robin_bit16_groups + elif self.bfloat16_enabled() and hasattr(self.optimizer, "bf16_groups"): + bit16_groups = self.optimizer.bf16_groups + else: + bit16_groups = self.optimizer.bit16_groups if self.zero_optimization_stage( + ) == 2 else self.optimizer.fp16_groups + + for bit16_group in bit16_groups: + param_shapes = OrderedDict() + for param in bit16_group: + cnt += 1 + numel += param.ds_numel if hasattr(param, "ds_numel") else param.numel() + shape = param.ds_shape if hasattr(param, "ds_shape") else param.shape + if param not in self.param_names: + raise ValueError(f"failed to find optimizer param in named params") + name = self.param_names[param] + param_shapes[name] = shape + + # uncomment to debug zero_to_fp32.py problems + # if self.global_rank == 0: print(f"saving param {name} {shape} (numel={shape.numel()})") + param_group_shapes.append(param_shapes) + # if self.global_rank == 0: print(f"Total saved {numel} numels in {cnt} params") + + return param_group_shapes + + def _get_shared_params(self): + """ + Returns a dict of shared params, which can later be used to reconstruct the original state dict, + e.g. in `zero_to_fp32`. Each dict entry is a pair of param names, where the key is the name + of the variable that isn't stored and the value is the actual param holding data. + """ + shared_index = {} + shared_params_by_full_name = {} + + is_zero3_model = (self.zero_optimization_partition_weights() + and any(hasattr(param, "ds_id") for param in self.module.parameters())) + + def get_layer_state_dict(module, prefix=""): + # handle params + for name, param in module.named_parameters(recurse=False): + if param is None or (is_zero3_model and not hasattr(param, "ds_id")): + continue + key = prefix + name + + # When weights are manged by stage 3, we can't rely on param.data_ptr() as it will be reused + # as weights get gathered and reduced, but param.ds_id is unique across all zero weights + # (and shared params will have the same param.ds_id) + param_id = param.ds_id if is_zero3_model else param.data_ptr() + + if param_id in shared_index: + # shared weights + #print(f"`{key}` is shared with `{shared_index[param_id]}`") + shared_params_by_full_name[key] = shared_index[param_id] + else: + shared_index[param_id] = key + + for name, child in module.named_children(): + if child is not None: + get_layer_state_dict(child, prefix + name + ".") + + if dist.get_rank() == 0: + get_layer_state_dict(self.module, prefix="") + + return shared_params_by_full_name + + def _copy_recovery_script(self, save_path): + base_dir = os.path.dirname(os.path.dirname(__file__)) + script = "zero_to_fp32.py" + src = os.path.join(base_dir, "utils", script) + dst = os.path.join(save_path, script) + #logger.info(f"creating recovery script {dst}") + copyfile(src, dst) + self._change_recovery_script_permissions(dst) + + def _change_recovery_script_permissions(self, dst): + # make executable (safeguard for file shares - Azure as example) + try: + os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC) + except (FileNotFoundError, PermissionError) as e: + #this message is used in unit test TestZeRONonDistributed + logger.info( + f'Warning: Could not change permissions for {dst} due to error: {e}. Continuing without changing permissions.' + ) + + def _save_zero_checkpoint(self, save_path, tag): + zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag) + zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(), ds_config=self.config, ds_version=version) + self.checkpoint_engine.save(zero_sd, zero_checkpoint_name) + + if self.global_rank == 0: + self._copy_recovery_script(save_path) + ckpt_type = 'zero' if self.zero_optimization() else 'bf16_zero' + logger.info(f'{ckpt_type} checkpoint saved {zero_checkpoint_name}') + + def _zero3_consolidated_16bit_state_dict(self, exclude_frozen_parameters=False): + """ + Get a full non-partitioned state_dict with fp16 weights on cpu. + Important: this function must be called on all ranks and not just rank 0. + This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but: + 1. consolidates the weights from different partitions on gpu0 + 2. works on one layer at a time to require as little gpu0 memory as possible, by + moving the already consolidated weights to cpu + 3. takes care to keep the shared params shared when gradually copying the params to cpu + Returns: + a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks + """ + if not self.zero_optimization_partition_weights(): + raise ValueError("this function requires ZeRO-3 mode") + + state_dict = OrderedDict() if dist.get_rank() == 0 else None + shared_params = {} + + def get_layer_state_dict(module, prefix=""): + # gather one layer at a time to be memory-efficient + # must use modifier_rank=0 to release GPU memory after each layer gathered + #see_memory_usage("before GatheredParameters", force=True) + with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): + if dist.get_rank() == 0: + # handle params + for name, param in module.named_parameters(recurse=False): + if param is None or (exclude_frozen_parameters and not param.requires_grad): + continue + key = prefix + name + # can't rely on param.data_ptr() as it will be reused as weights gets + # gathered and reduced, but param.ds_id is unique across all zero weights + # (and shared params will have the same param.ds_id) + if param.ds_id in shared_params: + # shared weights + #print(f"`{key}` is shared with `{shared_params[param.ds_id]}`") + state_dict[key] = state_dict[shared_params[param.ds_id]] + else: + state_dict[key] = param.detach().cpu() + shared_params[param.ds_id] = key + #print(f"param {param.ds_id} {param.shape} {key} ") + + # now buffers - not sure if need to take care of potentially shared weights here + for name, buf in module.named_buffers(recurse=False): + if (buf is not None and name not in module._non_persistent_buffers_set): + state_dict[prefix + name] = buf.detach().cpu() + #see_memory_usage("after GatheredParameters", force=True) + + for name, child in module.named_children(): + if child is not None: + get_layer_state_dict(child, prefix + name + ".") + + # Prepare for checkpoint save by ensuring all parameters are partitioned + if self._optimizer_has_ckpt_event_prologue(): + self.optimizer.checkpoint_event_prologue() + + see_memory_usage("before get_layer_state_dict", force=False) + get_layer_state_dict(self.module, prefix="") + see_memory_usage("after get_layer_state_dict", force=False) + + if self._optimizer_has_ckpt_event_epilogue(): + self.optimizer.checkpoint_event_epilogue() + + return state_dict + + def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"): + """has been renamed to save_16bit_model, keeping this around for backwards + compatibility""" + return self.save_16bit_model(save_dir, save_filename) + + def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin", exclude_frozen_parameters=False): + """ + Save 16bit model weights + + This method saves the 16bit model weights at the desired destination. + + Arguments: + save_dir: Required. Directory for saving the model + save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin`` + exclude_frozen_parameters: Optional. Exclude frozen parameters from checkpointed state. + + Returns: + ``True`` when a model has been saved, ``False`` otherwise. It will not be saved if + stage3_gather_16bit_weights_on_model_save is ``False``. + + Important: all processes must call this method and not just the process with rank 0. It is + because the processes need to work in sync to gather the weights. This method will hang + waiting to synchronize with other processes if it's called just for the process with rank 0. + + """ + + path = os.path.join(save_dir, save_filename) + + if self.zero_optimization_partition_weights(): + if self.zero_gather_16bit_weights_on_model_save(): + # consolidation is expensive in time and memory and therefore isn't a default + state_dict = self._zero3_consolidated_16bit_state_dict( + exclude_frozen_parameters=exclude_frozen_parameters) + else: + # the model will be bogus if not consolidated so don't confuse the user by saving it + logger.info( + f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False") + return False + else: + state_dict = self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters) + + tag = f"global_step{self.global_steps}" + tag = str(tag) + self.checkpoint_engine.create(tag) + + if dist.get_rank() == 0: + self.checkpoint_engine.makedirs(save_dir, exist_ok=True) + logger.info(f"Saving model weights to {path}, tag: {tag}") + self.checkpoint_engine.save(state_dict, path) + + self.checkpoint_engine.commit(tag) + + return True + + def empty_partition_cache(self): + """ + Release GPU memory consumed by offloaded model parameters. + """ + if hasattr(self.optimizer, 'empty_partition_cache'): + self.optimizer.empty_partition_cache() + gc.collect() + get_accelerator().empty_cache() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/lr_schedules.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/lr_schedules.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f7e15a4dbda0309c2831326975ce79a7143f97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/lr_schedules.py @@ -0,0 +1,878 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Implementation of learning rate schedules. + +Taken and modified from PyTorch v1.0.1 source +https://github.com/pytorch/pytorch/blob/v1.1.0/torch/optim/lr_scheduler.py +""" + +import argparse +from torch.optim import Optimizer +import math +from deepspeed.utils import logger + +LR_SCHEDULE = 'lr_schedule' +LR_RANGE_TEST = 'LRRangeTest' +ONE_CYCLE = 'OneCycle' +WARMUP_LR = 'WarmupLR' +WARMUP_DECAY_LR = 'WarmupDecayLR' +WARMUP_COSINE_LR = 'WarmupCosineLR' +VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR, WARMUP_COSINE_LR] + +LR_RANGE_TEST_MIN_LR = 'lr_range_test_min_lr' +LR_RANGE_TEST_STEP_RATE = 'lr_range_test_step_rate' +LR_RANGE_TEST_STEP_SIZE = 'lr_range_test_step_size' +LR_RANGE_TEST_STAIRCASE = 'lr_range_test_staircase' + +EDGE_VALUE = 'edge_value' +MID_VALUE = 'mid_value' + +CYCLE_FIRST_STEP_SIZE = 'cycle_first_step_size' +CYCLE_FIRST_STAIR_COUNT = 'cycle_first_stair_count' +CYCLE_SECOND_STEP_SIZE = 'cycle_second_step_size' +CYCLE_SECOND_STAIR_COUNT = 'cycle_second_stair_count' +DECAY_STEP_SIZE = 'decay_step_size' + +CYCLE_MIN_LR = 'cycle_min_lr' +CYCLE_MAX_LR = 'cycle_max_lr' +DECAY_LR_RATE = 'decay_lr_rate' + +CYCLE_MIN_MOM = 'cycle_min_mom' +CYCLE_MAX_MOM = 'cycle_max_mom' +DECAY_MOM_RATE = 'decay_mom_rate' + +WARMUP_MIN_LR = 'warmup_min_lr' +WARMUP_MAX_LR = 'warmup_max_lr' +WARMUP_NUM_STEPS = 'warmup_num_steps' +WARMUP_TYPE = 'warmup_type' +WARMUP_LOG_RATE = 'log' +WARMUP_LINEAR_RATE = 'linear' + +WARMUP_MIN_RATIO = 'warmup_min_ratio' +COS_MIN_RATIO = 'cos_min_ratio' + +TOTAL_NUM_STEPS = 'total_num_steps' + + +def add_tuning_arguments(parser): + group = parser.add_argument_group('Convergence Tuning', 'Convergence tuning configurations') + + # LR scheduler + group.add_argument('--lr_schedule', type=str, default=None, help='LR schedule for training.') + + # Learning rate range test + group.add_argument("--lr_range_test_min_lr", type=float, default=0.001, help='Starting lr value.') + group.add_argument("--lr_range_test_step_rate", type=float, default=1.0, help='scaling rate for LR range test.') + group.add_argument("--lr_range_test_step_size", type=int, default=1000, help='training steps per LR change.') + group.add_argument("--lr_range_test_staircase", + type=bool, + default=False, + help='use staircase scaling for LR range test.') + + # OneCycle schedule + group.add_argument("--cycle_first_step_size", + type=int, + default=1000, + help='size of first step of 1Cycle schedule (training steps).') + group.add_argument("--cycle_first_stair_count", + type=int, + default=-1, + help='first stair count for 1Cycle schedule.') + group.add_argument("--cycle_second_step_size", + type=int, + default=-1, + help='size of second step of 1Cycle schedule (default first_step_size).') + group.add_argument("--cycle_second_stair_count", + type=int, + default=-1, + help='second stair count for 1Cycle schedule.') + group.add_argument("--decay_step_size", + type=int, + default=1000, + help='size of intervals for applying post cycle decay (training steps).') + + # 1Cycle LR + group.add_argument("--cycle_min_lr", type=float, default=0.01, help='1Cycle LR lower bound.') + group.add_argument("--cycle_max_lr", type=float, default=0.1, help='1Cycle LR upper bound.') + group.add_argument("--decay_lr_rate", type=float, default=0.0, help='post cycle LR decay rate.') + + # 1Cycle Momentum + group.add_argument('--cycle_momentum', default=False, action='store_true', help='Enable 1Cycle momentum schedule.') + group.add_argument("--cycle_min_mom", type=float, default=0.8, help='1Cycle momentum lower bound.') + group.add_argument("--cycle_max_mom", type=float, default=0.9, help='1Cycle momentum upper bound.') + group.add_argument("--decay_mom_rate", type=float, default=0.0, help='post cycle momentum decay rate.') + + # Warmup LR + group.add_argument('--warmup_min_lr', type=float, default=0, help='WarmupLR minimum/initial LR value') + group.add_argument('--warmup_max_lr', type=float, default=0.001, help='WarmupLR maximum LR value.') + group.add_argument('--warmup_num_steps', type=int, default=1000, help='WarmupLR step count for LR warmup.') + group.add_argument('--warmup_type', + type=str, + default=WARMUP_LOG_RATE, + help='WarmupLR increasing function during warmup') + + # WarmUP cos LR + group.add_argument("--warmup_min_ratio", type=float, default=0.01, help='Cosine LR lower bound.') + group.add_argument("--cos_min_ratio", type=float, default=0.01, help='Cosine LR lower bound.') + + return parser + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser = add_tuning_arguments(parser) + + lr_sched_args, unknown_args = parser.parse_known_args() + return lr_sched_args, unknown_args + + +def override_lr_range_test_params(args, params): + if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None: + params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr + + if hasattr(args, LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None: + params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate + + if hasattr(args, LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None: + params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size + + if hasattr(args, LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None: + params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase + + +def override_1cycle_params(args, params): + if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None: + params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size + + if hasattr(args, CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None: + params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count + + if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None: + params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size + + if hasattr(args, CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None: + params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count + + if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None: + params[DECAY_STEP_SIZE] = args.decay_step_size + + # 1Cycle LR params + if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None: + params[CYCLE_MIN_LR] = args.cycle_min_lr + + if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None: + params[CYCLE_MAX_LR] = args.cycle_max_lr + + if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None: + params[DECAY_LR_RATE] = args.decay_lr_rate + + # 1Cycle MOM params + if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None: + params[CYCLE_MIN_MOM] = args.cycle_min_mom + + if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None: + params[CYCLE_MAX_MOM] = args.cycle_max_mom + + if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None: + params[DECAY_MOM_RATE] = args.decay_mom_rate + + +def override_warmupLR_params(args, params): + if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None: + params[WARMUP_MIN_LR] = args.warmup_min_lr + + if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None: + params[WARMUP_MAX_LR] = args.warmup_max_lr + + if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None: + params[WARMUP_NUM_STEPS] = args.warmup_num_steps + + if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None: + params[WARMUP_TYPE] = args.warmup_type + + +def override_params(args, params): + # LR range test params + override_lr_range_test_params(args, params) + + # 1Cycle params + override_1cycle_params(args, params) + + # WarmupLR params + override_warmupLR_params(args, params) + + +def get_config_from_args(args): + if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None: + return None, '--{} not specified on command line'.format(LR_SCHEDULE) + + if not args.lr_schedule in VALID_LR_SCHEDULES: + return None, '{} is not supported LR schedule'.format(args.lr_schedule) + + config = {} + config['type'] = args.lr_schedule + config['params'] = {} + + if args.lr_schedule == LR_RANGE_TEST: + override_lr_range_test_params(args, config['params']) + elif args.lr_schedule == ONE_CYCLE: + override_1cycle_params(args, config['params']) + else: + override_warmupLR_params(args, config['params']) + + return config, None + + +def get_lr_from_config(config): + if not 'type' in config: + return None, 'LR schedule type not defined in config' + + if not 'params' in config: + return None, 'LR schedule params not defined in config' + + lr_schedule = config['type'] + lr_params = config['params'] + + if not lr_schedule in VALID_LR_SCHEDULES: + return None, '{} is not a valid LR schedule'.format(lr_schedule) + + if lr_schedule == LR_RANGE_TEST: + return lr_params[LR_RANGE_TEST_MIN_LR], '' + if lr_schedule == ONE_CYCLE: + return lr_params[CYCLE_MAX_LR], '' + # Warmup LR + return lr_params[WARMUP_MAX_LR], '' + + +""" +Only optimizers that are subclass of torch.optim.Optimizer are supported. So check the passed optimizer and wrapped +optimizer to see if requirement is satisfied. +TODO: Looking under the hood to examine the wrapped optimizer is a hack that requires a better long-term fix. +""" + + +def get_torch_optimizer(optimizer): + if isinstance(optimizer, Optimizer): + return optimizer + + if hasattr(optimizer, 'optimizer') and isinstance(optimizer.optimizer, Optimizer): + return optimizer.optimizer + + raise TypeError('{} is not a subclass of torch.optim.Optimizer'.format(type(optimizer).__name__)) + + +class LRRangeTest(object): + """Sets the learning rate of each parameter group according to + learning rate range test (LRRT) policy. The policy increases learning + rate starting from a base value with a constant frequency, as detailed in + the paper `A disciplined approach to neural network hyper-parameters: Part1`_. + + LRRT policy is used for finding maximum LR that trains a model without divergence, and can be used to + configure the LR boundaries for Cyclic LR schedules. + + LRRT changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_range_test_min_lr (float or list): Initial learning rate which is the + lower boundary in the range test for each parameter group. + lr_range_test_step_size (int): Interval of training steps to increase learning rate. Default: 2000 + lr_range_test_step_rate (float): Scaling rate for range test. Default: 1.0 + lr_range_test_staircase (bool): Scale in staircase fashion, rather than continuous. Default: False. + last_batch_iteration (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_batch_iteration=-1, the schedule is started from the beginning. + Default: -1 + + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = LRRangeTest(optimizer) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay: + https://arxiv.org/abs/1803.09820 +""" + + def __init__(self, + optimizer: Optimizer, + lr_range_test_min_lr: float = 1e-3, + lr_range_test_step_size: int = 2000, + lr_range_test_step_rate: float = 1.0, + lr_range_test_staircase: bool = False, + last_batch_iteration: int = -1): + + self.optimizer = get_torch_optimizer(optimizer) + + if isinstance(lr_range_test_min_lr, list) or isinstance(lr_range_test_min_lr, tuple): + if len(lr_range_test_min_lr) != len(self.optimizer.param_groups): + raise ValueError("expected {} lr_range_test_min_lr, got {}".format(len(self.optimizer.param_groups), + len(lr_range_test_min_lr))) + self.min_lr = list(lr_range_test_min_lr) + else: + self.min_lr = [lr_range_test_min_lr] * len(self.optimizer.param_groups) + + self.step_size = lr_range_test_step_size + self.step_rate = lr_range_test_step_rate + self.last_batch_iteration = last_batch_iteration + self.staircase = lr_range_test_staircase + self.interval_fn = self._staircase_interval if lr_range_test_staircase else self._continuous_interval + + if last_batch_iteration == -1: + self._update_optimizer(self.min_lr) + + def _staircase_interval(self): + return math.floor(float(self.last_batch_iteration + 1) / self.step_size) + + def _continuous_interval(self): + return float(self.last_batch_iteration + 1) / self.step_size + + def _get_increase(self): + return (1 + self.step_rate * self.interval_fn()) + + def get_lr(self): + lr_increase = self._get_increase() + return [lr_range_test_min_lr * lr_increase for lr_range_test_min_lr in self.min_lr] + + def get_last_lr(self): + """ Return last computed learning rate by current scheduler. + """ + assert getattr(self, '_last_lr', None) is not None, "need to call step() first" + return self._last_lr + + def _update_optimizer(self, group_lrs): + for param_group, lr in zip(self.optimizer.param_groups, group_lrs): + param_group['lr'] = lr + + def step(self, batch_iteration=None): + if batch_iteration is None: + batch_iteration = self.last_batch_iteration + 1 + self.last_batch_iteration = batch_iteration + self._update_optimizer(self.get_lr()) + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + def state_dict(self): + return {'last_batch_iteration': self.last_batch_iteration} + + def load_state_dict(self, sd): + self.last_batch_iteration = sd['last_batch_iteration'] + + +class OneCycle(object): + """Sets the learning rate of each parameter group according to + 1Cycle learning rate policy (1CLR). 1CLR is a variation of the + Cyclical Learning Rate (CLR) policy that involves one cycle followed by + decay. The policy simultaneously cycles the learning rate (and momentum) + between two boundaries with a constant frequency, as detailed in + the paper `A disciplined approach to neural network hyper-parameters`_. + + 1CLR policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This implementation was adapted from the github repo: `pytorch/pytorch`_ + + Args: + optimizer (Optimizer): Wrapped optimizer. + cycle_min_lr (float or list): Initial learning rate which is the + lower boundary in the cycle for each parameter group. + cycle_max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (cycle_max_lr - cycle_min_lr). + The lr at any cycle is the sum of cycle_min_lr + and some scaling of the amplitude; therefore + cycle_max_lr may not actually be reached depending on + scaling function. + decay_lr_rate(float): Decay rate for learning rate. Default: 0. + cycle_first_step_size (int): Number of training iterations in the + increasing half of a cycle. Default: 2000 + cycle_second_step_size (int): Number of training iterations in the + decreasing half of a cycle. If cycle_second_step_size is None, + it is set to cycle_first_step_size. Default: None + cycle_first_stair_count(int): Number of stairs in first half of cycle phase. This means + lr/mom are changed in staircase fashion. Default 0, means staircase disabled. + cycle_second_stair_count(int): Number of stairs in second half of cycle phase. This means + lr/mom are changed in staircase fashion. Default 0, means staircase disabled. + decay_step_size (int): Intervals for applying decay in decay phase. Default: 0, means no decay. + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'cycle_min_mom' and 'cycle_max_mom'. + Default: True + cycle_min_mom (float or list): Initial momentum which is the + lower boundary in the cycle for each parameter group. + Default: 0.8 + cycle_max_mom (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (cycle_max_mom - cycle_min_mom). + The momentum at any cycle is the difference of cycle_max_mom + and some scaling of the amplitude; therefore + cycle_min_mom may not actually be reached depending on + scaling function. Default: 0.9 + decay_mom_rate (float): Decay rate for momentum. Default: 0. + last_batch_iteration (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_batch_iteration=-1, the schedule is started from the beginning. + Default: -1 + + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = OneCycle(optimizer, 0.0001, 0.0010) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + + .. _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay: https://arxiv.org/abs/1803.09820 + """ + + def __init__(self, + optimizer, + cycle_min_lr, + cycle_max_lr, + decay_lr_rate=0., + cycle_first_step_size=2000, + cycle_second_step_size=None, + cycle_first_stair_count=0, + cycle_second_stair_count=None, + decay_step_size=0, + cycle_momentum=True, + cycle_min_mom=0.8, + cycle_max_mom=0.9, + decay_mom_rate=0., + last_batch_iteration=-1): + + self.optimizer = get_torch_optimizer(optimizer) + + # Initialize cycle shape + self._initialize_cycle(cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count, + cycle_second_stair_count, decay_step_size) + + # Initialize cycle lr + self._initialize_lr(self.optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration) + + # Initialize cyclic momentum + self.cycle_momentum = cycle_momentum + if cycle_momentum: + self._initialize_momentum(self.optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate, + last_batch_iteration) + # Initialize batch iteration tracker + self.last_batch_iteration = last_batch_iteration + + # Configure cycle shape + + def _initialize_cycle(self, cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count, + cycle_second_stair_count, decay_step_size): + cycle_first_step_size = float(cycle_first_step_size) + cycle_second_step_size = float( + cycle_second_step_size) if cycle_second_step_size is not None else cycle_first_step_size + + self.total_size = cycle_first_step_size + cycle_second_step_size + self.step_ratio = cycle_first_step_size / self.total_size + self.first_stair_count = cycle_first_stair_count + self.second_stair_count = cycle_first_stair_count if cycle_second_stair_count is None else cycle_second_stair_count + self.decay_step_size = decay_step_size + + if math.isclose(self.decay_step_size, 0): + self.skip_lr_decay = True + self.skip_mom_decay = True + else: + self.skip_lr_decay = False + self.skip_mom_decay = False + + # Configure lr schedule + def _initialize_lr(self, optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration): + self.min_lrs = [cycle_min_lr] * len(optimizer.param_groups) + if last_batch_iteration == -1: + for lr, group in zip(self.min_lrs, optimizer.param_groups): + group['lr'] = lr + + self.max_lrs = [cycle_max_lr] * len(optimizer.param_groups) + self.decay_lr_rate = decay_lr_rate + + if math.isclose(self.decay_lr_rate, 0): + self.skip_lr_decay = True + + # Configure momentum schedule + def _initialize_momentum(self, optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate, last_batch_iteration): + if 'betas' not in optimizer.defaults: + optimizer_name = type(optimizer).__name__ + logger.warn( + f"cycle_momentum is disabled because optimizer {optimizer_name} does not support momentum, no betas attribute in defaults" + ) + self.cycle_momentum = False + return + + self.decay_mom_rate = decay_mom_rate + self.min_moms = [(cycle_min_mom, 0.99)] * len(optimizer.param_groups) + self.max_moms = [(cycle_max_mom, 0.99)] * len(optimizer.param_groups) + + if last_batch_iteration == -1: + for momentum, group in zip(self.min_moms, optimizer.param_groups): + group['betas'] = momentum + + if math.isclose(self.decay_mom_rate, 0): + self.skip_mom_decay = True + + def _get_scale_factor(self): + batch_iteration = (self.last_batch_iteration + 1) + cycle = math.floor(1 + batch_iteration / self.total_size) + x = 1. + batch_iteration / self.total_size - cycle + if x <= self.step_ratio: + scale_factor = x / self.step_ratio + else: + scale_factor = (x - 1) / (self.step_ratio - 1) + + return scale_factor + + def _get_cycle_mom(self): + scale_factor = self._get_scale_factor() + momentums = [] + for base_betas, max_betas in zip(self.min_moms, self.max_moms): + cycle_min_mom = base_betas[0] + cycle_max_mom = max_betas[0] + base_height = (cycle_max_mom - cycle_min_mom) * scale_factor + momentum = cycle_max_mom - base_height + momentums.append((momentum, base_betas[1])) + return momentums + + def _get_cycle_lr(self): + scale_factor = self._get_scale_factor() + lrs = [] + for cycle_min_lr, cycle_max_lr in zip(self.min_lrs, self.max_lrs): + base_height = (cycle_max_lr - cycle_min_lr) * scale_factor + lr = cycle_min_lr + base_height + lrs.append(lr) + + return lrs + + def _get_decay_mom(self, decay_batch_iteration): + if self.skip_mom_decay: + return self.max_moms + + decay_interval = decay_batch_iteration / self.decay_step_size + mom_decay_factor = (1 + self.decay_mom_rate * decay_interval) + momentums = [(beta0 * mom_decay_factor, beta1) for beta0, beta1 in self.max_moms] + + return momentums + + def _get_decay_lr(self, decay_batch_iteration): + """Calculates the learning rate at batch index. This function is used + after the cycle completes and post cycle decaying of lr/mom is enabled. + This function treats `self.last_batch_iteration` as the last batch index. + """ + if self.skip_lr_decay: + return self.min_lrs + + decay_interval = decay_batch_iteration / self.decay_step_size + lr_decay_factor = (1 + self.decay_lr_rate * decay_interval) + lrs = [cycle_min_lr / lr_decay_factor for cycle_min_lr in self.min_lrs] + + return lrs + + def get_lr(self): + """Calculates the learning rate at batch index. This function treats + `self.last_batch_iteration` as the last batch index. + """ + if self.last_batch_iteration < self.total_size: + return self._get_cycle_lr() + return self._get_decay_lr(self.last_batch_iteration - self.total_size + 1) + + def get_mom(self): + """Calculates the momentum at batch index. This function treats + `self.last_batch_iteration` as the last batch index. + """ + if not self.cycle_momentum: + return None + + if self.last_batch_iteration < self.total_size: + return self._get_cycle_mom() + return self._get_decay_mom(self.last_batch_iteration - self.total_size + 1) + + def get_last_lr(self): + """ Return last computed learning rate by current scheduler. + """ + assert getattr(self, '_last_lr', None) is not None, "need to call step() first" + return self._last_lr + + def step(self, batch_iteration=None): + """ Updates the optimizer with the learning rate for the last batch index. + `self.last_batch_iteration` is treated as the last batch index. + + If self.cycle_momentum is true, also updates optimizer momentum. + """ + if batch_iteration is None: + batch_iteration = self.last_batch_iteration + 1 + + self.last_batch_iteration = batch_iteration + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + if self.cycle_momentum: + momentums = self.get_mom() + for param_group, momentum in zip(self.optimizer.param_groups, momentums): + param_group['betas'] = momentum + + def state_dict(self): + return {'last_batch_iteration': self.last_batch_iteration} + + def load_state_dict(self, sd): + self.last_batch_iteration = sd['last_batch_iteration'] + + +class WarmupLR(object): + """Increase the learning rate of each parameter group from min lr to max lr + over warmup_num_steps steps, and then fix at max lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + warmup_min_lr (float or list): minimum learning rate. Default: 0 + warmup_max_lr (float or list): maximum learning rate. Default: 0.001 + warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000 + warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log + last_batch_iteration (int): The index of the last batch. Default: -1. + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = WarmupLR(optimizer) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + """ + + def __init__(self, + optimizer: Optimizer, + warmup_min_lr: float = 0.0, + warmup_max_lr: float = 0.001, + warmup_num_steps: int = 1000, + warmup_type: str = WARMUP_LOG_RATE, + last_batch_iteration: int = -1): + + self.optimizer = get_torch_optimizer(optimizer) + + self.min_lrs = self._format_param(self.optimizer, warmup_min_lr, "min_lr") + self.max_lrs = self._format_param(self.optimizer, warmup_max_lr, "max_lr") + self.delta_lrs = [big - small for big, small in zip(self.max_lrs, self.min_lrs)] + self.warmup_num_steps = max(2, warmup_num_steps) + # Currently only support linear and log function + if warmup_type not in {WARMUP_LOG_RATE, WARMUP_LINEAR_RATE}: + logger.warning(f"Using unknown warmup_type: {warmup_type}. The increasing function " + f"is set to default (log)") + warmup_type = WARMUP_LOG_RATE + self.warmup_type = warmup_type + self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps) + self.last_batch_iteration = last_batch_iteration + + def get_lr(self): + if self.last_batch_iteration < 0: + logger.warning("Attempting to get learning rate from scheduler before it has started") + return [0.0] + gamma = self._get_gamma() + return [min_lr + (delta_lr * gamma) for min_lr, delta_lr in zip(self.min_lrs, self.delta_lrs)] + + def get_last_lr(self): + """ Return last computed learning rate by current scheduler. + """ + assert getattr(self, '_last_lr', None) is not None, "need to call step() first" + return self._last_lr + + def step(self, last_batch_iteration=None): + if last_batch_iteration is None: + last_batch_iteration = self.last_batch_iteration + 1 + self.last_batch_iteration = last_batch_iteration + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + def state_dict(self): + return {'last_batch_iteration': self.last_batch_iteration} + + def load_state_dict(self, sd): + self.last_batch_iteration = sd['last_batch_iteration'] + + def _get_gamma(self): + if self.last_batch_iteration < self.warmup_num_steps: + if self.warmup_type == WARMUP_LOG_RATE: + return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1) + elif self.warmup_type == WARMUP_LINEAR_RATE: + return self.last_batch_iteration / self.warmup_num_steps + return 1.0 + + def _format_param(self, optimizer, param_value, param_name): + if isinstance(param_value, list) or isinstance(param_value, tuple): + if len(param_value) != len(optimizer.param_groups): + raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name, + FileNotFoundError(param_value))) + return list(param_value) + return [param_value] * len(optimizer.param_groups) + + +class WarmupDecayLR(WarmupLR): + """Increase the learning rate of each parameter group from min lr to max lr + over warmup_num_steps steps, and then decay at linear rate over the remaining training steps. + + Args: + optimizer (Optimizer): Wrapped optimizer. + total_num_steps (int): total number of training steps + warmup_min_lr (float or list): minimum learning rate. Default: 0 + warmup_max_lr (float or list): maximum learning rate. Default: 0.001 + warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000 + warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log + last_batch_iteration (int): The index of the last batch. Default: -1. + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = WarmupDecayLR(optimizer, 1000000) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + """ + + def __init__(self, + optimizer: Optimizer, + total_num_steps: int, + warmup_min_lr: float = 0.0, + warmup_max_lr: float = 0.001, + warmup_num_steps: int = 1000, + warmup_type: str = WARMUP_LOG_RATE, + last_batch_iteration: int = -1): + + self.total_num_steps = total_num_steps + super(WarmupDecayLR, self).__init__(optimizer, warmup_min_lr, warmup_max_lr, warmup_num_steps, warmup_type, + last_batch_iteration) + if self.total_num_steps < self.warmup_num_steps: + logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format( + total_num_steps, warmup_num_steps)) + + def _get_gamma(self): + if self.last_batch_iteration < self.warmup_num_steps: + if self.warmup_type == WARMUP_LOG_RATE: + return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1) + elif self.warmup_type == WARMUP_LINEAR_RATE: + return self.last_batch_iteration / self.warmup_num_steps + return max( + 0.0, + float(self.total_num_steps - self.last_batch_iteration) / + float(max(1.0, self.total_num_steps - self.warmup_num_steps))) + + +class WarmupCosineLR(object): + """Increase the learning rate of each parameter group from min lr ratio to max lr ratio + over warmup_num_steps steps, and then decay at cosine rate over the remaining training steps to min cosine ratio. + + Args: + optimizer (Optimizer): Wrapped optimizer. + total_num_steps (int): total number of training steps + warmup_min_ratio (float or list): warmup start learning rate ratio. Default: 0 + warmup_num_steps (int): number of steps to warm up from warmup_min_ratio to 1.0. Default: 1000 + warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log + cos_min_ratio (float): cosine end learning rate ratio. Default: 0.0001 + last_batch_iteration (int): The index of the last batch. Default: -1. + Example: + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = WarmupCosineLR(optimizer, 1000000) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + """ + + def __init__(self, + optimizer: Optimizer, + total_num_steps: int, + warmup_min_ratio: float = 0.0, + warmup_num_steps: int = 1000, + cos_min_ratio: float = 0.0001, + warmup_type: str = WARMUP_LOG_RATE, + last_batch_iteration: int = -1): + + self.optimizer = get_torch_optimizer(optimizer) + + self.total_num_steps = total_num_steps + self.last_batch_iteration = last_batch_iteration + self.cos_min_ratio = cos_min_ratio + + self.warmup_type = warmup_type + self.warmup_min_ratio = warmup_min_ratio + self.warmup_num_steps = max(2, warmup_num_steps) + self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps) + + if self.total_num_steps < self.warmup_num_steps: + logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format( + total_num_steps, warmup_num_steps)) + self.org_lrs = [group['lr'] for group in self.optimizer.param_groups] + + def get_lr_ratio(self): + if self.last_batch_iteration < 0: + logger.warning("Attempting to get learning rate from scheduler before it has started") + return [0.0] + + if self.last_batch_iteration < self.warmup_num_steps: + if self.warmup_type == WARMUP_LOG_RATE: + ratio = self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1) + elif self.warmup_type == WARMUP_LINEAR_RATE: + ratio = self.last_batch_iteration / self.warmup_num_steps + ratio_delta = 1. - self.warmup_min_ratio + ratio = self.warmup_min_ratio + ratio * ratio_delta + return ratio + + real_last_step = self.last_batch_iteration - self.warmup_num_steps + 1 + real_total_steps = self.total_num_steps - self.warmup_num_steps + ratio_delta = 1. - self.cos_min_ratio + ratio = (1 + math.cos(math.pi * real_last_step / real_total_steps)) / 2 + ratio = max(0.0, self.cos_min_ratio + ratio_delta * ratio) + return ratio + + def step(self, last_batch_iteration=None): + if last_batch_iteration is None: + last_batch_iteration = self.last_batch_iteration + 1 + self.last_batch_iteration = last_batch_iteration + + lrs = self.get_lr() + for param_group, lr in zip(self.optimizer.param_groups, lrs): + param_group['lr'] = lr + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + def get_lr(self): + if self.last_batch_iteration < 0: + logger.warning("Attempting to get learning rate from scheduler before it has started") + return [0.0] + lr_ratio = self.get_lr_ratio() + return [org_lr * lr_ratio for org_lr in self.org_lrs] + + def get_last_lr(self): + """ Return last computed learning rate by current scheduler. + """ + assert getattr(self, '_last_lr', None) is not None, "need to call step() first" + return self._last_lr + + def state_dict(self): + return {'last_batch_iteration': self.last_batch_iteration} + + def load_state_dict(self, sd): + self.last_batch_iteration = sd['last_batch_iteration'] + + def _format_param(self, optimizer, param_value, param_name): + if isinstance(param_value, list) or isinstance(param_value, tuple): + if len(param_value) != len(optimizer.param_groups): + raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name, + FileNotFoundError(param_value))) + return list(param_value) + return [param_value] * len(optimizer.param_groups) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/quantize.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..ad553138b2d7c8095aa1568f56a1f1a0b25e16d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/quantize.py @@ -0,0 +1,180 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import math +from deepspeed.utils import logger +from deepspeed.ops.quantizer import ds_quantizer + +TWO_D_PARAMS = 6 + + +class Quantizer(object): + + def __init__(self, + q_groups=1, + q_mixed_fp16=False, + q_change_ratio=0.01, + q_type=0, + q_rounding=0, + q_verbose=False, + q_eigenvalue=False, + use_quantizer_kernel=False, + layer_num=0): + + self.q_groups = q_groups + self.q_mixed_fp16 = q_mixed_fp16 + self.q_change_ratio = q_change_ratio + self.q_type = q_type + self.qsteps = 0 + self.quantize_real_ratio = 1.000 + self.q_verbose = q_verbose + self.q_eigenvalue = q_eigenvalue + self.use_quantizer_kernel = use_quantizer_kernel + self.q_rounding = q_rounding + self.layer_num = layer_num + + def any_precision_switch(self): + # Temporary disabled functionality + if self.layer_num == 0: + return True + result = False + for index in range(self.layer_num): + if self.q_start_bits[index] != self.q_target_bits: + next_step = self.qsteps + (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1)) + if next_step >= self.q_period[index]: + result = True + return result + + def quantize(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}): + + if overflow and not eigenvalue_enabled: + return + + self.step() + + self.update_fp16_ratio() + + for i in range(len(parameter_group)): + for p in parameter_group[i]: + if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits: + param_id = id(p) + if block_eigenvalue is None: + eigenvalue, layer_id = None, 0 + else: + eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None, + 0) + if eigenvalue is not None: + factor = 1 + math.floor(eigenvalue * 4) + p.data = self.compute_quantization(p.data, layer_id, factor) + else: + p.data = self.compute_quantization(p, layer_id) + + def step(self): + self.qsteps += 1 + + def quantize_highbit(self, inputs, num_bits): + + q_range = 2**num_bits + input_flat = inputs.reshape(self.q_groups, -1) + g_min = input_flat.amin(dim=-1, keepdim=True) + g_max = input_flat.amax(dim=-1, keepdim=True) + + # Random number generator (Uniform) + if self.q_rounding == 'nearest': + p = 0. + else: + p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5) + + if self.q_type == 'symmetric': + scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range + zero_point = 0. + input_flat = (input_flat / scale + p).round().clamp(-(q_range >> 1), (q_range >> 1) - 1) * scale + elif self.q_type == 'asymmetric': + scale = (g_max - g_min) / q_range + zero_point = (g_min / scale).round() * scale + input_flat = ((input_flat - zero_point) / scale + p).round().clamp(0, (q_range - 1)) * scale + zero_point + output = input_flat.reshape(inputs.shape).contiguous() + return output + + def quantize_tenary(self, inputs): + input_flat = inputs.reshape(self.q_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1).div(n) + thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat) + pos = (input_flat > thres).type(inputs.type()) + neg = (input_flat < -thres).type(inputs.type()) + mask = (input_flat.abs() > thres).type(inputs.type()) + alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1) + output = alpha * pos - alpha * neg + output = output.reshape(inputs.shape).contiguous() + return output + + def quantize_binary(self, inputs): + input_flat = inputs.reshape(self.q_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1, keepdim=True).div(n) + output = input_flat.sign().mul(m) + output = output.reshape(inputs.shape).contiguous() + return output + + def mixed_fp16_quantize(self, input, input_q, index): + if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1): + input_q = input * self.quantize_real_ratio + (1 - self.quantize_real_ratio) * input_q + return input_q + return input_q + + def compute_quantization(self, input, index=0, factor=1): + # fixing the quantization bits based on the training steps + # when reducing 1 bit at each period, we increase the period + # to go slowly toward the target quantization bits + # the period and starting bit can be configured + + if input.start_bits != input.target_bits: + if self.qsteps >= input.q_period: + self.quantize_real_ratio = 1.0 + input.q_period <<= 1 + input.q_period *= factor + input.start_bits -= 1 + if self.q_verbose: + logger.info( + f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}' + ) + assert (input.start_bits >= input.target_bits), \ + 'Quantization bit is lower than target precision bits!' + + if self.use_quantizer_kernel: + if input.start_bits <= 2: + raise ValueError('Quantization bit is too low, please do it without quantization kernel!') + input_q = ds_quantizer(input.data.clone(), + self.q_groups, + input.start_bits, + asym=False if self.q_type == 'symmetric' else True, + sr=False if self.q_rounding == 'nearest_neighbor' else True) + else: + if input.start_bits >= 3: + input_flat = self.quantize_highbit(input.data, input.start_bits) + elif input.start_bits == 2: + assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' + assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' + input_flat = self.quantize_tenary(input.data) + elif input.start_bits == 1: + assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' + assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' + input_flat = self.quantize_binary(input.data) + if self.use_quantizer_kernel: + return self.mixed_fp16_quantize(input.data, input_q, index) + else: + if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1: + input_flat = self.quantize_real_ratio * input.data + \ + (1 - self.quantize_real_ratio) * input_flat + return input_flat + + def update_fp16_ratio(self): + if self.q_mixed_fp16: + if self.quantize_real_ratio > 0: + self.quantize_real_ratio -= self.q_change_ratio + else: + self.quantize_real_ratio = 0.000 diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75fb6aa9d30ad62ad4b3ce2b24a38e42b8a56068 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .logging import logger, log_dist +from .comms_logging import get_caller_func +#from .distributed import init_distributed +from .init_on_device import OnDevice +from .groups import * +from .nvtx import instrument_w_nvtx +# TODO: Move tensor fragment and mixed precision to zero utils +from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad, map_to_flat_opt_states +from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state +from .tensor_fragment import set_full_hp_param +from .tensor_fragment import safe_set_full_fp32_param, safe_set_full_optimizer_state +from .tensor_fragment import safe_get_local_fp32_param, safe_get_local_grad, safe_get_local_optimizer_state +from .tensor_fragment import safe_set_local_fp32_param, safe_set_local_optimizer_state +from .z3_leaf_module import set_z3_leaf_modules, unset_z3_leaf_modules, get_z3_leaf_modules, z3_leaf_module, z3_leaf_parameter +from .mixed_precision_linkage import link_hp_params, lazy_init_hp_params_optimizer_state +from deepspeed.runtime.dataloader import RepeatingLoader +from .numa import get_numactl_cmd diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..882b18edb36ffad21b6d3cae8dbe7e9abbeab7f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/bwc.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/bwc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac8ea7b6fa8d9ac85be7ea1f709c38b45dc55073 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/bwc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/comms_logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/comms_logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7c2c95db9170ab4f177c21368899f8d5c4f5c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/comms_logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/debug.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9adf92440e6178e979f02e74beaed32dea8f0c41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/debug.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52413dad0ab5a21c0770781819fc9d0c3802f646 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/groups.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bca687eeb4eb6c6ebb5dc0d2784eaf90dc251fd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/groups.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/init_on_device.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/init_on_device.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53248a01e9a2ec8336c6c2961003bfd91b2894bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/init_on_device.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/logging.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c7342bc61b537c0073a442859ae1cedcb3b95d9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/logging.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/mixed_precision_linkage.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/mixed_precision_linkage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1afa9ca1fe125fc8ba1a0c2d7e2948c79c827eb7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/mixed_precision_linkage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/numa.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/numa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..426b0c01c579c73a723039d3d119ee3f4b24ac7f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/numa.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/nvtx.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/nvtx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..595f3087f22625c1fcd442c51ec9e77907dd105a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/nvtx.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/tensor_fragment.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/tensor_fragment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b0c2f67ac31175e318674e7d0520ae90be29b69 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/tensor_fragment.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/timer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c79ee035b3600007608716d76c1206f0437bffb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/timer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/torch.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cb33724f6be33cfaa26c963cfe3aaa90407938b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/torch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b03bf534e6daec0197e6e290e53598532fb26f03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/z3_leaf_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/z3_leaf_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22662b5c188304f70d44882bbc53621b09c7eeb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/z3_leaf_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/zero_to_fp32.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/zero_to_fp32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..144952956efea9ca6253db91806549adee8bed93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/utils/__pycache__/zero_to_fp32.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/bwc.py b/venv/lib/python3.10/site-packages/deepspeed/utils/bwc.py new file mode 100644 index 0000000000000000000000000000000000000000..69fcc251a68429a768892b7cf7c6824de7cb5ba2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/bwc.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + + +def bwc_tensor_model_parallel_rank(mpu=None): + """Backwards-compatible way of querying the tensor model parallel rank from + an ``mpu`` object. + + *Tensor* model parallelism means that tensors are physically split across + processes. This contrasts with *pipeline* model parallelism, in which the + layers are partitioned but tensors left intact. + + The API for tensor model parallelism has changed across versions and this + helper provides a best-effort implementation across versions of ``mpu`` + objects. The preferred mechanism is + ``mpu.get_tensor_model_parallel_rank()``. + + This should "just work" with both Megatron-LM and DeepSpeed's pipeline + parallelism. + + Args: + mpu (model parallel unit, optional): The tensor model parallel rank. + If ``mpu=None``, returns 0. Defaults to ``None``. + + Returns: + int: the rank + """ + if mpu is None: + # No model parallelism in easy :) + return 0 + + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_rank() + elif hasattr(mpu, 'get_slice_parallel_rank'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_rank() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_rank() + + +def bwc_tensor_model_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the tensor model parallel world size. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return 1 + + if hasattr(mpu, 'get_tensor_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_world_size() + elif hasattr(mpu, 'get_slice_parallel_world_size'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_world_size() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_world_size() + + +def bwc_tensor_model_parallel_group(mpu=None): + """Backwards-compatible way of querying the tensor model parallel group. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return None + + if hasattr(mpu, 'get_tensor_model_parallel_group'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_group() + elif hasattr(mpu, 'get_slice_parallel_group'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_group() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_group() + + +def bwc_pipeline_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the pipeline parallel world size.""" + world_size = 1 + if mpu is not None: + if hasattr(mpu, 'get_pipeline_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + world_size = mpu.get_pipeline_model_parallel_world_size() + elif hasattr(mpu, 'get_pipe_parallel_world_size'): + # DeepSpeed Topology + world_size = mpu.get_pipe_parallel_world_size() + return world_size + + +def bwc_pipeline_parallel_group(mpu=None): + """Backwards-compatible way of querying the pipeline parallel group.""" + if mpu is None: + return None + if hasattr(mpu, 'get_pipeline_model_parallel_group'): + # Megatron + return mpu.get_pipeline_model_parallel_group() + elif hasattr(mpu, 'get_pipe_parallel_group'): + # DeepSpeed Topology + return mpu.get_pipe_parallel_group() + assert False, 'mpu does not support pipeline parallel group' diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/comms_logging.py b/venv/lib/python3.10/site-packages/deepspeed/utils/comms_logging.py new file mode 100644 index 0000000000000000000000000000000000000000..0e405a06d23e8925b942e26273a62fc1a6b69743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/comms_logging.py @@ -0,0 +1,178 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +from deepspeed.utils import log_dist + + +def get_caller_func(frame=3): + import sys + return sys._getframe(frame).f_code.co_name + + +def print_rank_0(message): + import deepspeed.comm as dist + if dist.get_rank() == 0: + print(message) + + +# Helper function to pretty-print message sizes +def convert_size(size_bytes): + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +# Helper function to calculate algbw and busbw. +# See https://gist.github.com/jeffra/b5e80466b4c86be00ea3b6f130fb7a36 and https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md +def calc_bw_log(comm_op, size, duration): + import deepspeed.comm as dist + + n = dist.get_world_size() + tput = 0 + busbw = 0 + if comm_op == "all_to_all_single": + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_gather" or comm_op == "all_gather_into_tensor" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_tensor": + size *= n + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_reduce" or comm_op == "all_reduce_coalesced" or comm_op == "inference_all_reduce": + tput = (size * 2 / duration) + busbw = (size / duration) * (2 * (n - 1) / n) + elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier": + tput = (size / duration) + busbw = tput + else: + print_rank_0("wrong comm_op specified") # noqa: F821 + exit(0) + + # convert to Gbps + tput *= 8 + busbw *= 8 + + tput /= 1e6 + busbw /= 1e6 + + return tput, busbw + + +class CommsLogger: + + def __init__(self): + from deepspeed.comm.constants import COMMS_LOGGER_VERBOSE_DEFAULT, COMMS_LOGGER_DEBUG_DEFAULT, COMMS_LOGGER_PROF_OPS_DEFAULT, COMMS_LOGGER_PROF_ALL_DEFAULT, COMMS_LOGGER_ENABLED_DEFAULT + self.comms_dict = {} + self.verbose = COMMS_LOGGER_VERBOSE_DEFAULT + self.debug = COMMS_LOGGER_DEBUG_DEFAULT + self.prof_ops = COMMS_LOGGER_PROF_OPS_DEFAULT + self.prof_all = COMMS_LOGGER_PROF_ALL_DEFAULT + self.enabled = COMMS_LOGGER_ENABLED_DEFAULT + + def configure(self, comms_config): + self.enabled = comms_config.comms_logger_enabled + if self.enabled: + self.verbose = comms_config.comms_logger.verbose + self.debug = comms_config.comms_logger.debug + self.prof_ops = comms_config.comms_logger.prof_ops + self.prof_all = comms_config.comms_logger.prof_all + + # There are three settings for the op profiler: + # - Global profiling (profile all comms) + # - Op-type profiling (e.g. profile all all_reduce comms) + # - Op profiling (e.g. profile a specific all_reduce op) + def start_profiling_comms(self): + self.prof_all = True + + def stop_profiling_comms(self): + self.prof_all = True + + # E.g. start_profiling_op('all_reduce') + def start_profiling_op(self, op_name_list): + self.prof_ops = list(set(self.prof_ops) | set(op_name_list)) + + def stop_profiling_op(self, op_name_list): + self.prof_ops = [op for op in self.prof_ops if op not in op_name_list] + + # Add log entry + def append(self, raw_name, record_name, latency, msg_size): + algbw, busbw = calc_bw_log(raw_name, msg_size, latency) + if record_name in self.comms_dict.keys(): + # If this comm_op has already been logged with this message size, just add to existing record + if msg_size in self.comms_dict[record_name].keys(): + self.comms_dict[record_name][msg_size][0] += 1 + self.comms_dict[record_name][msg_size][1].append(latency) + self.comms_dict[record_name][msg_size][2].append(algbw) + self.comms_dict[record_name][msg_size][3].append(busbw) + # If this is a new message size for this comm_op, add new record under existing comm_op + else: + self.comms_dict[record_name][msg_size] = [1, [latency], [algbw], [busbw]] + else: + # Create entirely new record + self.comms_dict[record_name] = {msg_size: [1, [latency], [algbw], [busbw]]} + # If verbose, print every comm op + # TODO: Add to tensorboard + if self.verbose: + log_str = f"comm op: {record_name} | time (ms): {latency:.2f} | msg size: {convert_size(msg_size)} | algbw (Gbps): {algbw:.2f} | busbw (Gbps): {busbw:.2f}" + log_dist(log_str, [0]) + + # Print summary at end of iteration, epoch, or training + def log_all(self, print_log=True, show_straggler=False): + import torch + from deepspeed.utils.timer import trim_mean + import deepspeed.comm as dist + from deepspeed.comm.reduce_op import ReduceOp + if print_log: + print( + f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total Latency(ms)': <20}{'Avg Latency(ms)': <20}{'tput_avg (Gbps)': <20}{'busbw_avg (Gbps)': <20}" + ) + for record_name in self.comms_dict.keys(): + if print_log: + print(record_name) + for msg_size, vals in sorted(self.comms_dict[record_name].items()): + # vals[0] is the count for each msg size + count = vals[0] + # vals[1] is a list of latency records for each msg size + total_lat = sum(vals[1]) + # vals[2] and vals[3] are the lists of algbw and busbw, respectively + # Get rid of outliers when we print + avg_lat = trim_mean(vals[1], 0.1) + avg_algbw = trim_mean(vals[2], 0.1) + avg_busbw = trim_mean(vals[3], 0.1) + if print_log: + print( + f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{avg_lat: <20.2f}{avg_algbw: <20.2f}{avg_busbw: <20.2f}" + ) + + if show_straggler: + if print_log: + print("_______________________________") + print("Breakdown with straggler effect") + print("-------------------------------") + print( + f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total comm lat(ms)': <20}{'Total straggler(ms)': <20}{'Avg comm lat(ms)': <20}{'Avg straggler(ms)': <20}" + ) + for record_name in self.comms_dict.keys(): + if print_log: + print(record_name) + for msg_size, vals in sorted(self.comms_dict[record_name].items()): + # vals[0] is the count for each msg size + count = vals[0] + # vals[1] is a list of latency records for each msg size + lats = torch.tensor(vals[1]) + min_lats = torch.tensor(vals[1]) + dist.all_reduce(min_lats, op=ReduceOp.MIN) + total_lat = min_lats.sum().item() + total_straggler = (lats - min_lats).sum().item() + avg_lat = trim_mean(min_lats.tolist(), 0.1) + avg_straggler = trim_mean((lats - min_lats).tolist(), 0.1) + if print_log: + print( + f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{total_straggler: <20.2f}{avg_lat: <20.2f}{avg_straggler: <20.2f}" + ) diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/debug.py b/venv/lib/python3.10/site-packages/deepspeed/utils/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..cebea56255d9b5c8a8602c1afc1c856d9e154209 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/debug.py @@ -0,0 +1,153 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# For lazy import with printflock() +fcntl = None + +# for debug purposes map module and param objects to their fully qualified names +module_names = {} +param_names = {} + + +def debug_clear_module_and_param_names(): + global module_names + global param_names + module_names = {} + param_names = {} + + +def debug_extract_module_and_param_names(model): + # extract the fully qualified names as soon as the model is acquired + global module_names + global param_names + # XXX: can probably make a map of param2module and vice-versa + module_names = {module: name for name, module in model.named_modules()} + param_names = {param: name for name, param in model.named_parameters()} + + +def debug_module2name(module): + if module in module_names: + return module_names[module] + else: + return "unknown" + + +def debug_module2name_id(module): + return f"name={debug_module2name(module)} id={module.id}" + + +def debug_module2name_class(module): + return f"name={debug_module2name(module)} {module.__class__.__name__}" + + +def debug_param2name(param): + if param in param_names: + return param_names[param] + else: + return "unknown" + + +def debug_param2name_id(param): + return f"name={debug_param2name(param)} id={param.ds_id}" + + +def debug_param2name_id_shape(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}" + + +def debug_param2name_id_shape_device(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}" + + +def debug_param2name_id_numel(param): + return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}" + + +def debug_param2name_id_shape_status(param): + return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}" + + +def printflock(*msgs): + """ + + For printing messages for all concurrent gpus w/o getting interleaved text. + + This is useful when debugging issues where multi-gpus don't sync. + + 1. Enable the force debug in say partitioning and zero3 files + 2. Override the usual versions with :: + + def print_rank_0(message, debug=False, force=False): + rank = deepspeed.comm.get_rank() + printflock(f"[{rank}] {message}") + 3. run the program and you get both logs non-interleaved + + But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper + function might be more useful, as it's easier to send each log stream into a separate file and + then compare those. + + """ + global fcntl + if fcntl is None: + import fcntl + + with open(__file__, "r") as fh: + fcntl.flock(fh, fcntl.LOCK_EX) + try: + print(*msgs) + finally: + fcntl.flock(fh, fcntl.LOCK_UN) + + +fh = None + + +def log_rank_file(rank, *msgs): + """ + Print to a log file of the given rank + + This is useful for debugging hanging in sync processes. Here is a possible workflow: + + 1. Enable the force debug in say partitioning and zero3 files + 2. Override the usual versions of print_rank_0 in those files with :: + + def print_rank_0(message, debug=False, force=False): + rank = deepspeed.comm.get_rank() + log_rank_file(rank, message) + + 3. run the program + 4. fix up the expected differences, e.g. different cuda numbers :: + + perl -pi -e 's|cuda:1|cuda:0|' log_rank_* + + 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same + work (e.g. when some layers get conditionally skipped on one gpu but not all) + + diff -u log_rank_0.txt log_rank_1.txt | less + + """ + global fh + if fh is None: + fh = open(f"log_rank_{rank}.txt", "w") + for m in msgs: + fh.write(f"{m}\n") + fh.flush() + + +def print_backward_tensors(tensor): + + def _print_bwd_tensors(grad_fn): + print(f"Backward tensors in {grad_fn}") + for funcs in grad_fn.next_functions: + if funcs[0]: + try: + tensor = getattr(funcs[0], 'variable') + print(funcs[0]) + print(f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}") + except AttributeError as e: + _print_bwd_tensors(funcs[0]) + + if hasattr(tensor, 'grad_fn'): + _print_bwd_tensors(tensor.grad_fn) diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/exceptions.py b/venv/lib/python3.10/site-packages/deepspeed/utils/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9dba0933cc9e236ec150bfd5439f7ce8e157394a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/exceptions.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + + +class DeprecatedException(Exception): + pass diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/groups.py b/venv/lib/python3.10/site-packages/deepspeed/utils/groups.py new file mode 100644 index 0000000000000000000000000000000000000000..c49f4520e16e17bd727d3825b3a3e64640ef1421 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/groups.py @@ -0,0 +1,574 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# The file has been adapted from https://github.com/NVIDIA/Megatron-LM and retains the following license from the original file + +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Support different forms of parallelism in DeepSpeed using multiple process groups. + Given that there are multiple scenarios and use-cases, this file is going to be updated + frequently. For now, the group creation needed for the training scenario is being implemented. + For inference and other new scenarios, the code will be either reused or added to this file. +""" + +from deepspeed import comm as dist +from deepspeed.utils import log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size, bwc_pipeline_parallel_world_size +from deepspeed.utils.exceptions import DeprecatedException +from deepspeed.accelerator import get_accelerator +# Expert parallel group that the current rank belongs to. +_EXPERT_PARALLEL_GROUP = {} +# Expert data parallel group that the current rank belongs to. +_EXPERT_DATA_PARALLEL_GROUP = {} +# dist world group needs to be cloned for some cases +_WORLD_GROUP = None +# ZeRO parameter partitioning group that the current rank belongs to. +_ZERO_PARAM_INTRA_PARALLEL_GROUP = None +# global object to maintain mpu object if passed by a Megatron client +mpu = None +# global object that stores tensor parallel world size for experts +expert_tensor_parallel_world_size = 1 +# All to All quantized graident communication groups +_ALL_TO_ALL_GROUP = {} + +_DATA_PARALLEL_GROUP = None + + +# Deprecated groups initialize function. +def initialize(ep_size=1, mpu=None): + """ Deprecated function. Retained to inform the users.""" + raise DeprecatedException( + "Please do not use the groups.initialize() API as it is deprecated. Instead, pass the desired ep_size to deepspeed.moe.layer.MoE(..,ep_size,..)" + ) + + +def _ensure_divisibility(numerator, denominator): + """Ensure that numerator is divisible by the denominator.""" + assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator) + + +# Not currently used. Helper function to create a model (tensor) parallel group. +def _create_model_parallel(model_parallel_size_): + """ + Initialize model data parallel groups. + + Arguments: + model_parallel_size: number of GPUs used to parallelize model. + + Returns: + Tuple of data parallel group and model parallel group + + Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we + use 2 GPUs to parallelize the model. The present function will + create 4 model parallel groups and 2 data parallel groups as: + 4 model parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7] + 2 data parallel groups: + [g0, g2, g4, g6], [g1, g3, g5, g7] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0]) + # Get world size and rank. Ensure some consistencies. + assert dist.is_initialized() + world_size = dist.get_world_size() + model_parallel_size = min(model_parallel_size_, world_size) + _ensure_divisibility(world_size, model_parallel_size) + rank = dist.get_rank() + + _DATA_PARALLEL_GROUP = None + _MODEL_PARALLEL_GROUP = None + # Build the data parallel groups. + for i in range(model_parallel_size): + ranks = range(i, world_size, model_parallel_size) + group = dist.new_group(ranks) + if i == (rank % model_parallel_size): + _DATA_PARALLEL_GROUP = group + + # Build the model parallel groups. + for i in range(world_size // model_parallel_size): + ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) + group = dist.new_group(ranks) + if i == (rank // model_parallel_size): + _MODEL_PARALLEL_GROUP = group + + return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP + + +def _create_expert_and_data_parallel(expert_parallel_size_, use_data_before_expert_parallel_=False): + """ + Create expert and data parallel groups. + + Note: Caller of this function is responsible to check if the groups already exist. + + Example - E + D parallel + world_size = 16 + expert_parallel_size = 2 # number of experts in same group + expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params + expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all + data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE + use_data_before_expert_parallel_ (bool): Use the D + E instead of E + D topology + """ + assert dist.is_initialized() + + log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0]) + world_size = dist.get_world_size() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) + rank = dist.get_rank() + + pp_stride = world_size // pp_world_size + _ensure_divisibility(pp_stride, expert_parallel_size_) + + group_name = f"ep_size_{expert_parallel_size_}" + + # Build the expert data parallel groups. + global _EXPERT_DATA_PARALLEL_GROUP + + ep_stride = pp_stride // expert_parallel_size_ + + # Only create group if it does not already exist + if group_name not in _EXPERT_DATA_PARALLEL_GROUP: + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(expert_parallel_size_): + if use_data_before_expert_parallel_: + ranks = range(pp_stage_start + i * ep_stride, pp_stage_start + (i + 1) * ep_stride) + else: + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, expert_parallel_size_) + group = dist.new_group(ranks) + log_dist( + f'Creating expert data parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: + _EXPERT_DATA_PARALLEL_GROUP[group_name] = group + + # Build the expert parallel groups. + global _EXPERT_PARALLEL_GROUP + + # Only create group if it does not already exist + if group_name not in _EXPERT_PARALLEL_GROUP: + if use_data_before_expert_parallel_: + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(ep_stride): + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, ep_stride) + group = dist.new_group(ranks) + log_dist( + f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: + _EXPERT_PARALLEL_GROUP[group_name] = group + else: + for i in range(world_size // expert_parallel_size_): + ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) + group = dist.new_group(ranks) + log_dist(f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: + _EXPERT_PARALLEL_GROUP[group_name] = group + + +def _get_expert_parallel_ranks(world_size, + tensor_parallel_size_, + expert_parallel_size_, + pipeline_parallel_size_=1, + use_data_before_expert_parallel_=False): + """Generate expert parallel and expert data parallel group ranks list. + + Example - E + M + D parallel + world_size = 16 + model_degree = 2 + expert_degree = 4 # number of experts in same group + mp_group = [0, 1], [2,3], [4,5] ... + data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] + expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] + expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] + + Args: + world_size (int): Distributed world size. + tensor_parallel_size_ (int): Tensor parallel group size. + expert_parallel_size_ (int): Expert parallel group size. + pipeline_parallel_size_ (int): Pipeline parallel group size + use_data_before_expert_parallel_ (bool): Use the D + E instead of E + D topology + Returns: + Expert parallel group ranks and Expert data parallel group ranks list. + """ + _ensure_divisibility(world_size, tensor_parallel_size_ * pipeline_parallel_size_) + dp_world_size = world_size // (tensor_parallel_size_ * pipeline_parallel_size_) + _ensure_divisibility(dp_world_size, expert_parallel_size_) + + # Generate data parallel groups + data_parallel_groups = [] + dp_group_size = tensor_parallel_size_ + pp_stride = world_size // pipeline_parallel_size_ + + if use_data_before_expert_parallel_: + dp_stride = world_size // expert_parallel_size_ // tensor_parallel_size_ // pipeline_parallel_size_ + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list()) + for ds in range(dp_stride): + # [0, 4, 8, 12, 16, 20, 24, 28, 2, 6, 10, 14, 18, 22, 26, 30] + # [1, 5, 9, 13, 17, 21, 25, 29, 3, 7, 11, 15, 19, 23, 27, 31] + data_parallel_groups[-1].extend( + list( + range(pp_stage_start + i + ds * tensor_parallel_size_, pp_stage_next, + dp_stride * tensor_parallel_size_))) + else: + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list(range(pp_stage_start + i, pp_stage_next, dp_group_size))) + + expert_parallel_groups = [] + expert_data_parallel_groups = [] + for dp_ranks in data_parallel_groups: + # partition of expert parallel groups, e.g. [0,2,4,6], [8,10,12,14] + part_ep_groups = [] + for i in range(0, dp_world_size, expert_parallel_size_): + part_ep_groups.append(dp_ranks[i:i + expert_parallel_size_]) + expert_parallel_groups.extend(part_ep_groups) + + # zip part_ep_groups get expert data parallel ranks, e.g [0,8],[2,10],[4,12],[6,14] + for expert_dp_ranks in zip(*part_ep_groups): + expert_data_parallel_groups.append(list(expert_dp_ranks)) + + return expert_parallel_groups, expert_data_parallel_groups + + +def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_before_expert_parallel_=False): + """ + Create expert and data parallel groups based on MPU (model parallel) group. + + Note: Caller of this function is responsible to check if the groups already exist. + + Example - E + M + D parallel + world_size = 16 + model_degree = 2 + expert_degree = 4 # number of experts in same group + mp_group = [0, 1], [2,3], [4,5] ... + data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] + expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] + expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] + """ + assert dist.is_initialized(), "dist is not initialized" + tensor_parallel_size_ = bwc_tensor_model_parallel_world_size(mpu) + + global expert_tensor_parallel_world_size + expert_tensor_parallel_world_size = tensor_parallel_size_ + + world_size = dist.get_world_size() + rank = dist.get_rank() + dp_world_size = mpu.get_data_parallel_world_size() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) + + _ensure_divisibility(world_size, tensor_parallel_size_) + _ensure_divisibility(dp_world_size, expert_parallel_size_) + + log_dist( + f"Creating deepspeed groups with model parallel size {tensor_parallel_size_}, " + f"pipeline parallel size {pp_world_size}, expert parallel size {expert_parallel_size_}, " + f"world size {world_size}, dp world size {dp_world_size}", [0]) + + global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP + + group_name = f"ep_size_{expert_parallel_size_}" + + # Only create groups if they don't already exist + # Need to check conditions outside the group creation loop because of the way torch.dist group creation works + if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP: + expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( + world_size, tensor_parallel_size_, expert_parallel_size_, pp_world_size, use_data_before_expert_parallel_) + for ranks in expert_parallel_groups: + group = dist.new_group(ranks) + if rank in list(ranks): + _EXPERT_PARALLEL_GROUP[group_name] = group + + for ranks in expert_data_parallel_groups: + group = dist.new_group(ranks) + if rank in list(ranks): + _EXPERT_DATA_PARALLEL_GROUP[group_name] = group + + +def _get_max_expert_size(): + """Get the maximum ep_size from all the created groups.""" + assert _EXPERT_PARALLEL_GROUP is not None, "Warning! Process group not initialized" + keylist = [] + for key in _EXPERT_PARALLEL_GROUP.keys(): + # index 2 is ep_size in the group name: ep_size_ + index = 2 + keylist.append(int(key.split('_')[index])) + return max(keylist) if len(keylist) > 0 else None + + +def _get_max_expert_size_name(): + """Get the name of the group with max. ep_size""" + return f'ep_size_{_get_max_expert_size()}' + + +def _get_max_expert_parallel_group(): + """Get the max expert parallel size.""" + return _get_expert_parallel_group(_get_max_expert_size_name()) + + +def _get_expert_parallel_group(group_name): + """Get the expert parallel group the caller rank belongs to.""" + assert group_name in _EXPERT_PARALLEL_GROUP, \ + 'expert parallel group is not initialized' + return _EXPERT_PARALLEL_GROUP[group_name] + + +def _get_expert_parallel_group_dict(): + """Get the expert parallel group dict.""" + return _EXPERT_PARALLEL_GROUP + + +def _get_expert_data_parallel_group(group_name): + """Get the expert data parallel group the caller rank belongs to.""" + assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \ + 'expert data parallel group is not initialized' + return _EXPERT_DATA_PARALLEL_GROUP[group_name] + + +def _get_expert_data_parallel_group_dict(): + """Get the expert data parallel group dict.""" + return _EXPERT_DATA_PARALLEL_GROUP + + +def _clone_world_group(): + """Create a clone of the world group + Note: We need to clone the dist world group because we + use dist.get_global_rank() utility function in DeepSpeed at many places. + As that function does not work on dist.group.WORLD, we + need to keep a clone of it. + """ + assert dist.is_initialized(), "dist is not initialized" + global _WORLD_GROUP + if _WORLD_GROUP is None: + # If not cloned already, clone the world group + _WORLD_GROUP = dist.new_group(ranks=range(dist.get_world_size())) + return _WORLD_GROUP + + +def _get_local_all_to_all_group(): + assert dist.is_initialized(), 'dist is not initialized' + global _ALL_TO_ALL_GROUP + device_per_node = get_accelerator().device_count() + num_local = dist.get_world_size() // device_per_node + if num_local == 0 and dist.get_world_size() > 0: + assert dist.get_world_size() >= 1, 'num_gpus must >=1, cannot initialize All-To-All' + cur_rank = [] + for i in range(dist.get_world_size()): + cur_rank.append(i) + _ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=cur_rank) + elif num_local == 1: + assert dist.get_world_size( + ) == device_per_node, 'num_gpus not equal to device per node, cannot initialize All-To-All' + _ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=[i for i in range(device_per_node)]) + else: + assert dist.get_world_size() > device_per_node, 'num_nodes<2 cannot initialize All-To-All' + for i in range(num_local): + local_rank = [j + device_per_node * i for j in range(device_per_node)] + _ALL_TO_ALL_GROUP[f"local_{i}"] = dist.new_group(ranks=local_rank) + + for i in range(device_per_node): + cur_rank = [] + for j in range(num_local): + cur_rank.append(i + j * device_per_node) + _ALL_TO_ALL_GROUP[f"global_{i}"] = dist.new_group(ranks=cur_rank) + return _ALL_TO_ALL_GROUP + + +def _get_data_parallel_group(): + """Get the data parallel group the caller rank belongs to.""" + assert dist.is_initialized(), 'dist is not initialized' + global mpu + if mpu is not None: + return mpu.get_data_parallel_group() + # Return the clone of dist world group + return _clone_world_group() + + +def _get_broadcast_src_rank(): + return dist.get_global_rank(_get_sequence_data_parallel_group(), 0) + + +def _get_expert_broadcast_src_rank(group_name): + return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0) + + +def _get_expert_parallel_world_size(group_name): + """Return world size for the expert parallel group.""" + return dist.get_world_size(group=_get_expert_parallel_group(group_name)) + + +def _get_expert_data_parallel_world_size(group_name): + """Return world size for the expert data parallel group.""" + return dist.get_world_size(group=_get_expert_data_parallel_group(group_name)) + + +def _get_expert_parallel_rank(group_name): + """Return my rank for the expert parallel group.""" + return dist.get_rank(group=_get_expert_parallel_group(group_name)) + + +def _get_expert_parallel_src_rank(group_name): + """Calculate the global rank corresponding to a local rank zero + in the expert parallel group.""" + global_rank = dist.get_rank() + local_world_size = _get_expert_parallel_world_size(group_name) + return (global_rank // local_world_size) * local_world_size + + +def _get_expert_data_parallel_rank(group_name): + """Return my rank for the expert data parallel group.""" + return dist.get_rank(group=_get_expert_data_parallel_group(group_name)) + + +def _get_data_parallel_world_size(): + """Return world size for the data parallel group.""" + global mpu + if mpu is not None: + return mpu.get_data_parallel_world_size() + return dist.get_world_size(group=_get_data_parallel_group()) + + +def _get_model_parallel_world_size(): + """Return world size for the model parallel group.""" + global mpu + if mpu is not None: + return mpu.get_model_parallel_world_size() + return 1 + + +def _get_data_parallel_rank(): + """Return my rank for the data parallel group.""" + return dist.get_rank(group=_get_data_parallel_group()) + + +def _get_sequence_parallel_world_size(): + """Return world size for the model parallel group.""" + global mpu + if mpu is not None and hasattr(mpu, 'get_sequence_parallel_world_size'): + return mpu.get_sequence_parallel_world_size() + return 1 + + +def _get_sequence_parallel_rank(): + """Return my rank for the data parallel group.""" + global mpu + if mpu is not None and hasattr(mpu, 'get_sequence_parallel_rank'): + return mpu.get_sequence_parallel_rank() + return 0 + + +def _get_sequence_parallel_group(): + global mpu + if mpu is not None and hasattr(mpu, 'get_sequence_parallel_group'): + return mpu.get_sequence_parallel_group() + return None + + +def _get_sequence_data_parallel_world_size(): + """Return world size for the model parallel group.""" + global mpu + if mpu is not None and hasattr(mpu, 'get_sequence_data_parallel_world_size'): + return mpu.get_sequence_data_parallel_world_size() + return _get_data_parallel_world_size() + + +def _get_sequence_data_parallel_rank(): + """Return my rank for the data parallel group.""" + global mpu + if mpu is not None and hasattr(mpu, 'get_sequence_data_parallel_rank'): + return mpu.get_sequence_data_parallel_rank() + return _get_data_parallel_rank() + + +def _get_sequence_data_parallel_group(): + global mpu + # When sequence parallelism is enabled, the process group for zero sharding and + # gradient allreduce must be across both dimensions of data and sequence parallelism. + if mpu is not None and hasattr(mpu, 'get_sequence_data_parallel_group'): + return mpu.get_sequence_data_parallel_group() + return _get_data_parallel_group() + + +def _get_expert_model_parallel_world_size(): + global expert_tensor_parallel_world_size + return expert_tensor_parallel_world_size + + +def _create_zero_param_parallel_group(group_size): + """ + Create parameter partitioning group within ZeRO data parallel groups. + + Example - ZP + D parallel + world_size = 16 + zero_hpz_partition_size = 2 # number of ranks with replicated params (dual partitioning) + zero_param_intra_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - segmented (subgroup) with rep partition + data_parallel_group = [0,1,...,15] - all reduce is on ZeRO model + """ + assert dist.is_initialized() + global _ZERO_PARAM_INTRA_PARALLEL_GROUP + # Only create group if it does not already exist + assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is None, \ + 'ZeRO parameter intra parallel group is already initialized' + + world_size = dist.get_world_size() + rank = dist.get_rank() + + zero_param_parallel_size_ = min(group_size, world_size) + _ensure_divisibility(world_size, zero_param_parallel_size_) + + # Build the ZeRO param intra parallel groups. + for i in range(world_size // zero_param_parallel_size_): + ranks = range(i * zero_param_parallel_size_, (i + 1) * zero_param_parallel_size_) + group = dist.new_group(ranks) + if i == (rank // zero_param_parallel_size_): + _ZERO_PARAM_INTRA_PARALLEL_GROUP = group + + +def _get_zero_param_intra_parallel_group(): + """Get the ZeRO parameter partitioning intra parallel group the caller rank belongs to.""" + #assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is not None, \ + # 'ZeRO parameter partitioning group is not initialized' + #TODO: Add warning + return _ZERO_PARAM_INTRA_PARALLEL_GROUP + + +def _zero_param_parallel_is_initialized(): + """Check if ZeRO data parallel with parameter partititioning groups are initialized.""" + ###TODO: assert that MPU is not set + if _ZERO_PARAM_INTRA_PARALLEL_GROUP is None and _DATA_PARALLEL_GROUP is None: + return False + + +def _get_zero_param_intra_parallel_rank_in_mygroup(): + """Return my rank for the ZeRO parameter inter parallel group.""" + return dist.get_rank(group=_get_zero_param_intra_parallel_group()) + + +def _get_zero_param_intra_parallel_group_world_size(): + """Return world size for the ZeRO parameter parallel group.""" + return dist.get_world_size(group=_get_zero_param_intra_parallel_group()) + + +def _get_zero_param_intra_parallel_group_ranks(): + """Return all ranks for the ZeRO parameter intra parallel group.""" + return dist.get_all_ranks_from_group(group=_get_zero_param_intra_parallel_group()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/init_on_device.py b/venv/lib/python3.10/site-packages/deepspeed/utils/init_on_device.py new file mode 100644 index 0000000000000000000000000000000000000000..52dbf71d9562b73d868f71ee7ec7894a187e00f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/init_on_device.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from typing import Callable +from torch import Tensor +from packaging import version as pkg_version + + +class OnDevice(object): + """ + Create modules/tensors w. specific devices and dtypes. Examples: + + Create MyModule which consists of many different sub-modules and parameters. In this case we can create + MyModule as a collection of 'meta' tensors by passing `device='meta'` or we can create the module _directly_ + on a CUDA device by passing `device=f'cuda:{local_rank}'` (where `local_rank` is the local GPU id. + + with OnDevice(dtype=torch.float16, device='meta'): + model = MyModel() + + with OnDevice(dtype=torch.float16, device=f'cuda:{local_rank}'): + model = MyModel() + + """ + + _orig_torch_empty = torch.empty + _orig_torch_zeros = torch.zeros + _orig_torch_ones = torch.ones + _orig_torch_full = torch.full + + def __init__(self, dtype, device="meta", enabled=True): + self.dtype = dtype + self.enabled = enabled + self.device = device + + if device == "meta": + if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__): + raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+") + + def fp_tensor_constructor(self, fn: Callable, target_fp_dtype: torch.dtype) -> Callable: + + def wrapped_fn(*args, **kwargs) -> Tensor: + if kwargs.get("device", None) is None: + kwargs['device'] = self.device + tensor: Tensor = fn(*args, **kwargs) + if tensor.is_floating_point(): + tensor = tensor.to(target_fp_dtype) + return tensor + + return wrapped_fn + + def get_new_tensor_fn_for_dtype(self, dtype: torch.dtype) -> Callable: + + def new_tensor(cls, *args) -> Tensor: + tensor = OnDevice._orig_torch_empty(0, device=self.device).new_empty(*args) + if tensor.is_floating_point(): + tensor = tensor.to(dtype) + return tensor + + return new_tensor + + def __enter__(self): + if not self.enabled: + return + torch.Tensor.__old_new__ = torch.Tensor.__new__ + torch.Tensor.__new__ = self.get_new_tensor_fn_for_dtype(self.dtype) + torch.empty = self.fp_tensor_constructor(self._orig_torch_empty, self.dtype) + torch.zeros = self.fp_tensor_constructor(self._orig_torch_zeros, self.dtype) + torch.ones = self.fp_tensor_constructor(self._orig_torch_ones, self.dtype) + torch.full = self.fp_tensor_constructor(self._orig_torch_full, self.dtype) + + def __exit__(self, exc_type, exc_value, traceback): + if not self.enabled: + return + torch.Tensor.__new__ = torch.Tensor.__old_new__ + torch.empty = self._orig_torch_empty + torch.zeros = self._orig_torch_zeros + torch.ones = self._orig_torch_ones + torch.full = self._orig_torch_full diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/logging.py b/venv/lib/python3.10/site-packages/deepspeed/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..1e62d96e10325516b6e0d14ad85b9461a314ead1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/logging.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import functools +import logging +import sys +import os + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + + +class LoggerFactory: + + @staticmethod + def create_logger(name=None, level=logging.INFO): + """create a logger + + Args: + name (str): name of the logger + level: level of logger + + Raises: + ValueError is name is None + """ + + if name is None: + raise ValueError("name for logger cannot be None") + + formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] " + "[%(filename)s:%(lineno)d:%(funcName)s] %(message)s") + + logger_ = logging.getLogger(name) + logger_.setLevel(level) + logger_.propagate = False + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(level) + ch.setFormatter(formatter) + logger_.addHandler(ch) + return logger_ + + +logger = LoggerFactory.create_logger(name="DeepSpeed", level=logging.INFO) + + +@functools.lru_cache(None) +def warning_once(*args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. + The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to + another type of cache that includes the caller frame information in the hashing function. + """ + logger.warning(*args, **kwargs) + + +logger.warning_once = warning_once + + +def print_configuration(args, name): + logger.info("{}:".format(name)) + for arg in sorted(vars(args)): + dots = "." * (29 - len(arg)) + logger.info(" {} {} {}".format(arg, dots, getattr(args, arg))) + + +def log_dist(message, ranks=None, level=logging.INFO): + from deepspeed import comm as dist + """Log message when one of following condition meets + + + not dist.is_initialized() + + dist.get_rank() in ranks if ranks is not None or ranks = [-1] + + Args: + message (str) + ranks (list) + level (int) + + """ + should_log = not dist.is_initialized() + ranks = ranks or [] + my_rank = dist.get_rank() if dist.is_initialized() else -1 + if ranks and not should_log: + should_log = ranks[0] == -1 + should_log = should_log or (my_rank in set(ranks)) + if should_log: + final_message = "[Rank {}] {}".format(my_rank, message) + logger.log(level, final_message) + + +def print_json_dist(message, ranks=None, path=None): + from deepspeed import comm as dist + """Print message when one of following condition meets + + + not dist.is_initialized() + + dist.get_rank() in ranks if ranks is not None or ranks = [-1] + + Args: + message (str) + ranks (list) + path (str) + + """ + should_log = not dist.is_initialized() + ranks = ranks or [] + my_rank = dist.get_rank() if dist.is_initialized() else -1 + if ranks and not should_log: + should_log = ranks[0] == -1 + should_log = should_log or (my_rank in set(ranks)) + if should_log: + message['rank'] = my_rank + import json + with open(path, 'w') as outfile: + json.dump(message, outfile) + os.fsync(outfile) + + +def get_current_level(): + """ + Return logger's current log level + """ + return logger.getEffectiveLevel() + + +def should_log_le(max_log_level_str): + """ + Args: + max_log_level_str: maximum log level as a string + + Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``. + + Example: + + ``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG`` + """ + + if not isinstance(max_log_level_str, str): + raise ValueError(f"{max_log_level_str} is not a string") + + max_log_level_str = max_log_level_str.lower() + if max_log_level_str not in log_levels: + raise ValueError(f"{max_log_level_str} is not one of the `logging` levels") + + return get_current_level() <= log_levels[max_log_level_str] diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/mixed_precision_linkage.py b/venv/lib/python3.10/site-packages/deepspeed/utils/mixed_precision_linkage.py new file mode 100644 index 0000000000000000000000000000000000000000..7dea6ba322dbe900927c31356a25dbaea1a3c31b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/mixed_precision_linkage.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import types +from deepspeed.utils import get_full_hp_param, get_full_hp_grad, get_hp_fragment_mapping +from deepspeed.utils import set_full_hp_param + + +def link_hp_params(lp_param_list, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload, + param_group_index, partition_start, partition_size, dp_group): + local_lp_param_and_offset = _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group) + + for lp_param, lp_start in local_lp_param_and_offset: + lp_param._hp_mapping = get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, + offload_gradient_dict, use_offload, param_group_index, + partition_start, partition_size) + + +def lazy_init_hp_params_optimizer_state(lp_param_list, flat_hp_partition, optimizer_state): + for lp in lp_param_list: + if lp._hp_mapping is not None: + lp._hp_mapping.set_optim_state_fragment(flat_hp_partition, optimizer_state[flat_hp_partition]) + + +def _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group): + current_offset = 0 + param_and_offset_list = [] + partition_end = partition_start + partition_size + index_in_param_group = 0 + for i, lp_param in enumerate(lp_param_list): + lp_param._hp_mapping = None + lp_param._dp_group = dp_group + lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param) + lp_param.get_full_hp_grad = types.MethodType(get_full_hp_grad, lp_param) + lp_param.set_full_hp_param = types.MethodType(set_full_hp_param, lp_param) + + # lp_param overlaps with partition if both are true + # 1) current_offset < partition_end, + # 2) current_offset + lp_param.numel() >= partition_start + lp_param_end = current_offset + lp_param.numel() + if current_offset < partition_end and lp_param_end > partition_start: + param_and_offset_list.append((lp_param, current_offset)) + lp_param._index_in_param_group = index_in_param_group + # Indices for params in this partition/GPU + index_in_param_group += 1 + current_offset += lp_param.numel() + + return param_and_offset_list diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/numa.py b/venv/lib/python3.10/site-packages/deepspeed/utils/numa.py new file mode 100644 index 0000000000000000000000000000000000000000..13617826b1cea0bd6ca8abafcabe979426016695 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/numa.py @@ -0,0 +1,202 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +# return a list of list for cores to numa mapping +# [ +# [ cores for numa 0 ] +# [ cores belong to numa 1 ] +# ... +# ] + +import distutils +import os +import psutil +import subprocess + + +# return a list of list for cores to numa mapping +# [ +# [ cores for numa 0 ] +# [ cores belong to numa 1 ] +# ... +# ] +def get_numa_cores(): + ret = [] + output = subprocess.check_output(['numactl', '--hardware']).decode("utf-8") + lines = output.split('\n') + for line in lines: + if line.startswith('available:'): + num_numas = int(line.split(' ')[1]) + break + for numa in range(num_numas): + for line in lines: + if line.startswith(f'node {numa} cpus:'): + cores = line.split(' ')[3:] + ret.append([int(core) for core in cores]) + return ret + + +def check_for_numactl_pkg(): + libs = dict( + dpkg=["-l", "numactl", "apt"], + pacman=["-Q", "numactl", "pacman"], + rpm=["-q", "numactl", "yum"], + ) + + found = False + for pkgmgr, data in libs.items(): + flag, lib, tool = data + path = distutils.spawn.find_executable(pkgmgr) + if path is not None: + cmd = f"{pkgmgr} {flag} {lib}" + result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + if result.wait() == 0: + found = True + else: + print(f"please install the {lib} package with {tool}") + break + return found + + +def parse_range(rng): + try: + value = int(rng) + return range(value, value + 1) + except ValueError: + # value is not a single number + parts = rng.split('-') + if len(parts) != 2: + raise ValueError("Bad range: '%s', range must be either a number or two number separated by dash" % + (rng, )) + start = int(parts[0]) + end = int(parts[1]) + if start > end: + raise ValueError("Bad range: '%s', range end must larger than or equal to start" % (rng, )) + return range(start, end + 1) + + +# parse comma and dash separated range list into list +# i.e. "0,2-4,6" --> [0, 2, 3, 4, 6] +# rules: +# 1. Range list number be comma separated, each item are either a single number, +# or a range marked by two numbers (both number are included in the range) +# 2. Sub ranges must be in ascend order and not overlap with each other +# 3. No space in the range expression +def parse_range_list(range_str): + number_list = [] + last = -1 + range_list = range_str.split(',') + for sub_range in range_list: + sub_number_list = parse_range(sub_range) + if sub_number_list[0] <= last: + raise ValueError( + "Bad range: '%s', sub ranges must not overlap with each other and should be in ascend order" % + (range_str, )) + last = sub_number_list[-1] + number_list.extend(sub_number_list) + return number_list + + +def get_numactl_cmd(bind_core_list, num_local_procs, local_rank): + numactl_cmd = [] + check_for_numactl_pkg() + if 'KMP_AFFINITY' in os.environ.keys(): + raise ValueError("Environment variable KMP_AFFINITY conflicts with numactl " + "because it interfere with how many CPU cores numactl can set. " + "Unset KMP_AFFINITY before launching deepspeed.\n\n" + "\t$ unset KMP_AFFINITY\n" + "\t$ deepspeed ") + if bind_core_list is not None: + core_list = parse_range_list(bind_core_list) + total_cores = len(core_list) + else: + total_cores = psutil.cpu_count(logical=False) + core_list = range(total_cores) + cores_per_rank = total_cores // num_local_procs + assert cores_per_rank >= 1, "At least one core needs to be assigned to each rank" + core_list_for_rank = core_list[cores_per_rank * local_rank:cores_per_rank * (local_rank + 1)] + numactl_cmd.append("numactl") + + # check if all cores belong to same numa, if true, bind process to that numa domain with -m parameter + numa_cores = get_numa_cores() + num_numas = len(numa_cores) + + numa_mode = "normal" + + non_empty_numa_list = [] + empty_numa_list = [] + previous_numa_cores = [] + numa_node_list = [] + numa_node_list_list = [] + for i in range(num_numas): + # look for empty numa which is HBM numa + if numa_cores[i] == []: + empty_numa_list.append(i) + else: + non_empty_numa_list.append(i) + + # check for fakenuma + if numa_cores[i] == previous_numa_cores: + if numa_node_list == []: + #first duplication, add previous node into list + numa_node_list.append(i - 1) + numa_node_list.append(i) + else: + if numa_node_list != []: + numa_node_list_list.append(numa_node_list) + numa_node_list = [] + previous_numa_cores = numa_cores[i] + if numa_node_list != []: + numa_node_list_list.append(numa_node_list) + + if empty_numa_list != [] and len(empty_numa_list) == len(non_empty_numa_list): + numa_mode = "flat_hbm" + numa_dict = dict(zip(non_empty_numa_list, empty_numa_list)) + elif numa_node_list_list != []: + numa_mode = "fake" + + if numa_mode == "normal": + for i in range(num_numas): + if set(core_list_for_rank) <= set(numa_cores[i]): + numactl_cmd.append("-m") + numactl_cmd.append(f"{i}") + break + elif numa_mode == "flat_hbm": + for i in range(num_numas): + if set(core_list_for_rank) <= set(numa_cores[i]): + numactl_cmd.append("-p") + numactl_cmd.append(f"{numa_dict[i]}") + break + elif numa_mode == "fake": + for i in range(num_numas): + if set(core_list_for_rank) <= set(numa_cores[i]): + for nodes in numa_node_list_list: + if i in nodes: + numactl_cmd.append("-m") + numactl_cmd.append(f"{','.join(map(str, nodes))}") + break + # the following construct break the outer loop if inner loop breaks + else: + continue + break + + numactl_cmd.append("-C") + last_core = core_list_for_rank[0] + first_core = last_core + core_list_str = f"{last_core}" + for core_id in core_list_for_rank[1:]: + if core_id == last_core + 1: + last_core = core_id + continue + else: + if first_core == last_core: + core_list_str = f"{core_list_str},{core_id}" + else: + core_list_str = f"{core_list_str}-{last_core},{core_id}" + first_core = core_id + last_core = core_id + if first_core != last_core: + core_list_str = f"{core_list_str}-{last_core}" + numactl_cmd.append(f"{core_list_str}") + return cores_per_rank, numactl_cmd diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/nvtx.py b/venv/lib/python3.10/site-packages/deepspeed/utils/nvtx.py new file mode 100644 index 0000000000000000000000000000000000000000..3823599e7bf20b7c48045390a50cee1c31f42e7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/nvtx.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.accelerator import get_accelerator + + +def instrument_w_nvtx(func): + """decorator that causes an NVTX range to be recorded for the duration of the + function call.""" + + def wrapped_fn(*args, **kwargs): + get_accelerator().range_push(func.__qualname__) + ret_val = func(*args, **kwargs) + get_accelerator().range_pop() + return ret_val + + return wrapped_fn diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/tensor_fragment.py b/venv/lib/python3.10/site-packages/deepspeed/utils/tensor_fragment.py new file mode 100644 index 0000000000000000000000000000000000000000..b34722580ddd145f466e728afff7d406ae260e9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/tensor_fragment.py @@ -0,0 +1,387 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from dataclasses import dataclass +from deepspeed import comm as dist +from typing import Dict + + +@dataclass +class fragment_address: + numel: int + start: int + + +@dataclass +class tensor_fragment: + lp_fragment: torch.Tensor + lp_fragment_address: fragment_address + hp_fragment: torch.Tensor + hp_fragment_address: fragment_address + gradient_dict: Dict + offload_gradient_dict: Dict + use_offload: bool + param_group_index: int + optim_fragment: Dict = None + + def update_hp(self): + self.hp_fragment.data.copy_(self.lp_fragment.data) + + def update_lp(self): + self.lp_fragment.data.copy_(self.hp_fragment.data) + + def get_optim_state_fragment(self, key): + if key in self.optim_fragment: + return self.optim_fragment[key] + else: + raise ValueError(f'{key} not found in optimizer state fragment') + + def set_optim_state_fragment(self, flat_hp_partition, optim_fragment): + self.optim_fragment = { + key: value.narrow(0, self.hp_fragment_address.start, self.hp_fragment_address.numel) + for key, value in optim_fragment.items() + if torch.is_tensor(value) and value.shape == flat_hp_partition.shape + } + + def get_hp_fragment_address(self): + return self.hp_fragment_address + + def get_optim_state_keys(self): + return list(self.optim_fragment.keys()) + + def get_hp_fragment(self, optim_state_key=None): + if optim_state_key is None: + return self.hp_fragment + return self.get_optim_state_fragment(optim_state_key) + + +def map_to_flat_opt_states(flat_hp_tensor, lp_tensors, optim_state, opt_keys): + for key in opt_keys: + hp_param = flat_hp_tensor + buffer = torch.zeros_like(hp_param) + + for lp in lp_tensors: + if lp._hp_mapping is not None: + hp_fragment_address = lp._hp_mapping.get_hp_fragment_address() + hp_fragment = buffer.narrow(0, hp_fragment_address.start, hp_fragment_address.numel) + hp_fragment.data.copy_(lp._hp_mapping.get_hp_fragment(optim_state_key=key).data) + lp._hp_mapping.hp_fragment = hp_fragment + + optim_state[hp_param][key] = buffer + + +def get_full_hp_param(self, optim_state_key=None): + reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() + if self._hp_mapping is not None: + lp_frag_address = self._hp_mapping.lp_fragment_address + reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel) + hp_fragment = self._hp_mapping.get_hp_fragment(optim_state_key) + reduce_fragment.data.copy_(hp_fragment.data) + dist.all_reduce(reduce_buffer, group=self._dp_group) + return reduce_buffer.reshape_as(self) + + +def set_full_hp_param(self, value, optim_state_key=None): + if self._hp_mapping is not None: + lp_frag_address = self._hp_mapping.lp_fragment_address + value_fragment = torch.narrow(value.flatten(), 0, lp_frag_address.start, lp_frag_address.numel) + hp_fragment = self._hp_mapping.get_hp_fragment(optim_state_key) + hp_fragment.data.copy_(value_fragment.data) + + +def get_full_hp_grad(self): + reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() + if self._hp_mapping is not None: + hp_mapping = self._hp_mapping + + if hp_mapping.use_offload: + gradient_dict = hp_mapping.offload_gradient_dict + else: + gradient_dict = hp_mapping.gradient_dict + + if hp_mapping.param_group_index not in gradient_dict or gradient_dict[hp_mapping.param_group_index] is None: + raise ValueError("Gradients are only available immediately after backward and before engine step") + + lp_grad_fragment = gradient_dict[hp_mapping.param_group_index][self._index_in_param_group] + hp_grad_fragment = lp_grad_fragment.to(torch.float32).flatten() + + lp_frag_address = self._hp_mapping.lp_fragment_address + reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel) + + if self.view(-1).shape == hp_grad_fragment.shape: + reduce_buffer.data.copy_(hp_grad_fragment.data) + else: + reduce_fragment.data.copy_(hp_grad_fragment.data) + + dist.all_reduce(reduce_buffer, group=self._dp_group) + return reduce_buffer.reshape_as(self) + + +def safe_get_full_fp32_param(param): + """Assemble and return the fp32 parameter of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_full_hp_param(param) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_param() + return None + + +def safe_set_full_fp32_param(param, value): + """Update the partitioned fp32 parameter of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + value (``torch.Tensor``): New value + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + param._z3_optimizer.set_full_hp_param(value, param) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + param.set_full_hp_param(value) + + +def safe_get_full_optimizer_state(param, optim_state_key): + """Assemble and return the fp32 optimizer state of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + optim_state_key (``string``): Key value of optimizer state (e.g., `exp_avg` in Adam optimizer) + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_full_hp_param(param, optim_state_key) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_param(optim_state_key) + return None + + +def safe_set_full_optimizer_state(param, value, optim_state_key): + """Update the partitioned fp32 optimizer state of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + value (``torch.Tensor``): New value + optim_state_key (``string``): Key value of optimizer state (e.g., `exp_avg` in Adam optimizer) + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + param._z3_optimizer.set_full_hp_param(value, param, optim_state_key) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + param.set_full_hp_param(value, optim_state_key) + + +# TODO: Figure out the correct return dtype +def safe_get_full_grad(param): + """Assemble and return the fp32 gradient of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + """ + if param.grad is not None: + return param.grad + + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_fp32_grad_for_param(param) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_grad() + + return None + + +### Local API START ### +def safe_get_local_grad(param): + """Get the fp32 gradient of a partitioned parameter. + Args: + param (``torch.nn.Parameter``): A model parameter + """ + if param.grad is not None: + return param.grad + + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_local_fp32_grad_for_param(param) + + return None + + +def safe_get_local_fp32_param(param): + """Get the fp32 partitioned parameter. + Args: + param (``torch.nn.Parameter``): A model parameter + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_local_fp32_param(param) + + return None + + +def safe_get_local_optimizer_state(param, optim_state_key): + """Get the fp32 optimizer state of a partitioned parameter. + Args: + param (``torch.nn.Parameter``): A model parameter + optim_state_key (``string``): Key value of optimizer state (e.g., `exp_avg` in Adam optimizer) + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_local_fp32_param(param, optim_state_key) + + return None + + +def safe_set_local_optimizer_state(param, value, optim_state_key): + """Update the fp32 optimizer state of a partitioned parameter. + Args: + param (``torch.nn.Parameter``): A model parameter + value (``torch.Tensor``): New value + optim_state_key (``string``): Key value of optimizer state (e.g., `exp_avg` in Adam optimizer) + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + param._z3_optimizer.set_local_hp_param(value, param, optim_state_key) + + +def safe_set_local_fp32_param(param, value): + """Update the partitioned fp32 parameter. + Args: + param (``torch.nn.Parameter``): A model parameter + value (``torch.Tensor``): New value + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + param._z3_optimizer.set_local_hp_param(value, param) + + +### Local API END ### + +# TODO: Implement API for setting ZeRO partitioned gradients + + +def get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload, + param_group_index, partition_start, partition_size): + lp_end = lp_param.numel() + lp_start + hp_start = partition_start + hp_end = partition_start + partition_size + + fragment_start = max(lp_start, hp_start) + fragment_end = min(lp_end, hp_end) + assert fragment_start < fragment_end, \ + f'fragment start {fragment_start} should be < fragment_end {fragment_end}' + + fragment_numel = fragment_end - fragment_start + hp_frag_address = fragment_address(start=fragment_start - hp_start, numel=fragment_numel) + hp_fragment_tensor = flat_hp_partition.narrow(0, hp_frag_address.start, hp_frag_address.numel) + + lp_frag_address = fragment_address(start=fragment_start - lp_start, numel=fragment_numel) + lp_fragment_tensor = lp_param.flatten().narrow(0, lp_frag_address.start, lp_frag_address.numel) + + return tensor_fragment(lp_fragment=lp_fragment_tensor, + lp_fragment_address=lp_frag_address, + hp_fragment=hp_fragment_tensor, + hp_fragment_address=hp_frag_address, + gradient_dict=gradient_dict, + offload_gradient_dict=offload_gradient_dict, + use_offload=use_offload, + param_group_index=param_group_index) + + +''' +Logic for lp_param to hp_param mapping + +lp lp0 lp1 lp2 lp3 lp4 <------- indices/names +lp [ ][ ][ ][ ][ ] <-------- tensors +flat_lp [ ] <-------- flat lp params +flat_hp [ ] <------------------ flat hp partition on current rank +full_hp [ ] <------- full flat hp params + + +lp2 + full numel = 16 + lp_frag + numel = 12 + frag_start = 3 + frag_end = 15 + hp_frag + numel = 12 + frag_start = 0 + frag_end = 11 + + hp_frag.copy_(lp_frag) + + +lp3: + full numel = 4 + lp_frag + numel = 4 + start = 0 + end = 3 + hp_frag + numel = 4 + start = 12 + end = 15 + + +lp4: + full numel = 12 + lp_frag + numel = 4 + start = 0 + end = 3 + hp_frag + numel = 4 + start = 16 + end = 19 + + + +Visual depiction of above +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ { ( } ) ] + lx hx ly hy + ly-hx + + +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ ( { ) } ] + hx lx hy ly + hy-lx + +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ ( { } ) ] + hx lx ly hy + ly-lx + +lp -> (lx, hy) +flat_hp -> (hx, hy) +''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/timer.py b/venv/lib/python3.10/site-packages/deepspeed/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..11ef54fe466587f811a6e8826d18199aa79b3821 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/timer.py @@ -0,0 +1,309 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import time +from numpy import mean +from deepspeed.utils.logging import log_dist +from deepspeed.accelerator import get_accelerator + +FORWARD_MICRO_TIMER = 'fwd_microstep' +FORWARD_GLOBAL_TIMER = 'fwd' +BACKWARD_MICRO_TIMER = 'bwd_microstep' +BACKWARD_GLOBAL_TIMER = 'bwd' +BACKWARD_INNER_MICRO_TIMER = 'bwd_inner_microstep' +BACKWARD_INNER_GLOBAL_TIMER = 'bwd_inner' +BACKWARD_REDUCE_MICRO_TIMER = 'bwd_allreduce_microstep' +BACKWARD_REDUCE_GLOBAL_TIMER = 'bwd_allreduce' +STEP_MICRO_TIMER = 'step_microstep' +STEP_GLOBAL_TIMER = 'step' + +try: + import psutil + + PSUTILS_INSTALLED = True +except ImportError: + PSUTILS_INSTALLED = False + pass + + +class CudaEventTimer(object): + + def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event): + self.start_event = start_event + self.end_event = end_event + + def get_elapsed_msec(self): + get_accelerator().current_stream().wait_event(self.end_event) + self.end_event.synchronize() + return self.start_event.elapsed_time(self.end_event) + + +class SynchronizedWallClockTimer: + """Group of timers. Borrowed from Nvidia Megatron code""" + + class Timer: + """Timer.""" + + def __init__(self, name): + self.name_ = name + self.started_ = False + self.event_timers = [] + self.use_host_timer = get_accelerator().use_host_timers() + self.start_event = None + self.elapsed_records = None + self.start_time = 0.0 + self.end_time = 0.0 + + def start(self): + """Start the timer.""" + assert not self.started_, f"{self.name_} timer has already been started" + if self.use_host_timer: + self.start_time = time.time() + else: + event_class = get_accelerator().Event + self.start_event = event_class(enable_timing=True) + self.start_event.record() + self.started_ = True + + def stop(self, reset=False, record=False): + """Stop the timer.""" + assert self.started_, "timer is not started" + event_class = get_accelerator().Event + if self.use_host_timer: + self.end_time = time.time() + self.event_timers.append(self.end_time - self.start_time) + else: + event_class = get_accelerator().Event + end_event = event_class(enable_timing=True) + end_event.record() + self.event_timers.append(CudaEventTimer(self.start_event, end_event)) + self.start_event = None + self.started_ = False + + def _get_elapsed_msec(self): + if self.use_host_timer: + self.elapsed_records = [et * 1000.0 for et in self.event_timers] + else: + self.elapsed_records = [et.get_elapsed_msec() for et in self.event_timers] + self.event_timers.clear() + return sum(self.elapsed_records) + + def reset(self): + """Reset timer.""" + self.started_ = False + self.start_event = None + self.elapsed_records = None + self.event_timers.clear() + + def elapsed(self, reset=True): + """Calculate the elapsed time.""" + started_ = self.started_ + # If the timing in progress, end it first. + if self.started_: + self.stop() + # Get the elapsed time. + elapsed_ = self._get_elapsed_msec() + # Reset the elapsed time + if reset: + self.reset() + # If timing was in progress, set it back. + if started_: + self.start() + return elapsed_ + + def mean(self): + self.elapsed(reset=False) + return trim_mean(self.elapsed_records, 0.1) + + def __init__(self): + self.timers = {} + + def get_timers(self): + return self.timers + + def __call__(self, name): + if name not in self.timers: + self.timers[name] = self.Timer(name) + return self.timers[name] + + @staticmethod + def memory_usage(): + alloc = "mem_allocated: {:.4f} GB".format(get_accelerator().memory_allocated() / (1024 * 1024 * 1024)) + max_alloc = "max_mem_allocated: {:.4f} GB".format(get_accelerator().max_memory_allocated() / + (1024 * 1024 * 1024)) + cache = "cache_allocated: {:.4f} GB".format(get_accelerator().memory_cached() / (1024 * 1024 * 1024)) + max_cache = "max_cache_allocated: {:.4f} GB".format(get_accelerator().max_memory_cached() / + (1024 * 1024 * 1024)) + return " | {} | {} | {} | {}".format(alloc, max_alloc, cache, max_cache) + + def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False, ranks=None): + """Log a group of timers.""" + assert normalizer > 0.0 + string = f"time (ms)" + for name in names: + if name in self.timers: + elapsed_time = (self.timers[name].elapsed(reset=reset) / normalizer) + string += " | {}: {:.2f}".format(name, elapsed_time) + + log_dist(string, ranks=ranks or [0]) + + def get_mean(self, names, normalizer=1.0, reset=True): + """Get the mean of a group of timers.""" + assert normalizer > 0.0 + means = {} + for name in names: + if name in self.timers: + elapsed_time = (self.timers[name].mean() * 1000.0 / normalizer) + means[name] = elapsed_time + return means + + +class NoopTimer: + + class Timer: + + def start(self): + ... + + def reset(self): + ... + + def stop(self, **kwargs): + ... + + def elapsed(self, **kwargs): + return 0 + + def mean(self): + return 0 + + def __init__(self): + self.timer = self.Timer() + + def __call__(self, name): + return self.timer + + def get_timers(self): + return {} + + def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False, ranks=None): + ... + + def get_mean(self, names, normalizer=1.0, reset=True): + ... + + +class ThroughputTimer: + + def __init__( + self, + batch_size, + start_step=2, + steps_per_output=50, + monitor_memory=False, + logging_fn=None, + ): + from deepspeed.utils import logger + self.start_time = 0 + self.end_time = 0 + self.started = False + self.batch_size = 1 if batch_size is None else batch_size + self.start_step = start_step + self.epoch_count = 0 + self.micro_step_count = 0 + self.global_step_count = 0 + self.total_elapsed_time = 0 + self.step_elapsed_time = 0 + self.steps_per_output = steps_per_output + self.monitor_memory = monitor_memory + self.logging = logging_fn + if self.logging is None: + self.logging = logger.info + self.initialized = False + + if self.monitor_memory and not PSUTILS_INSTALLED: + raise ImportError("Unable to import 'psutils', please install package") + + def update_epoch_count(self): + self.epoch_count += 1 + self.micro_step_count = 0 + + def _init_timer(self): + self.initialized = True + + def start(self): + self._init_timer() + self.started = True + if self.global_step_count >= self.start_step: + get_accelerator().synchronize() + self.start_time = time.time() + + def stop(self, global_step=False, report_speed=True): + if not self.started: + return + self.started = False + self.micro_step_count += 1 + if global_step: + self.global_step_count += 1 + + if self.start_time > 0: + get_accelerator().synchronize() + self.end_time = time.time() + duration = self.end_time - self.start_time + self.total_elapsed_time += duration + self.step_elapsed_time += duration + + if global_step: + if report_speed and self.global_step_count % self.steps_per_output == 0: + self.logging( + "epoch={}/micro_step={}/global_step={}, RunningAvgSamplesPerSec={}, CurrSamplesPerSec={}, " + "MemAllocated={}GB, MaxMemAllocated={}GB".format( + self.epoch_count, + self.micro_step_count, + self.global_step_count, + self.avg_samples_per_sec(), + self.batch_size / self.step_elapsed_time, + round(get_accelerator().memory_allocated() / 1024**3, 2), + round(get_accelerator().max_memory_allocated() / 1024**3, 2), + )) + if self.monitor_memory: + virt_mem = psutil.virtual_memory() + swap = psutil.swap_memory() + self.logging("epoch={}/micro_step={}/global_step={}, vm %: {}, swap %: {}".format( + self.epoch_count, + self.micro_step_count, + self.global_step_count, + virt_mem.percent, + swap.percent, + )) + self.step_elapsed_time = 0 + + def avg_samples_per_sec(self): + if self.global_step_count > 0: + total_step_offset = self.global_step_count - self.start_step + avg_time_per_step = self.total_elapsed_time / total_step_offset + # training samples per second + return self.batch_size / avg_time_per_step + return float("-inf") + + +def trim_mean(data, trim_percent): + """Compute the trimmed mean of a list of numbers. + + Args: + data (list): List of numbers. + trim_percent (float): Percentage of data to trim. + + Returns: + float: Trimmed mean. + """ + assert 0.0 <= trim_percent <= 1.0 + n = len(data) + # Account for edge case of empty list + if len(data) == 0: + return 0 + data.sort() + k = int(round(n * (trim_percent))) + return mean(data[k:n - k]) diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/torch.py b/venv/lib/python3.10/site-packages/deepspeed/utils/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..eb22d3561035d05ec27b65adc0b311c488eb43a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/torch.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from packaging import version as pkg_version + +import torch + + +def required_torch_version(min_version=None, max_version=None): + assert min_version or max_version, "Must provide a min_version or max_version argument" + + torch_version = pkg_version.parse(torch.__version__) + + if min_version and pkg_version.parse(str(min_version)) > torch_version: + return False + + if max_version and pkg_version.parse(str(max_version)) < torch_version: + return False + + return True diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/types.py b/venv/lib/python3.10/site-packages/deepspeed/utils/types.py new file mode 100644 index 0000000000000000000000000000000000000000..96b5df625965401f52f6fd9b60396821943fe177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/types.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import IntEnum + + +class ActivationFuncType(IntEnum): + UNKNOWN = 0 + GELU = 1 + ReLU = 2 + GATED_GELU = 3 + GATED_SILU = 4 + + +GATED_ACTIVATION_TYPES = [ + ActivationFuncType.GATED_GELU, + ActivationFuncType.GATED_SILU, +] + + +class NormType(IntEnum): + UNKNOWN = 0 + LayerNorm = 1 + GroupNorm = 2 + RMSNorm = 3 diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/z3_leaf_module.py b/venv/lib/python3.10/site-packages/deepspeed/utils/z3_leaf_module.py new file mode 100644 index 0000000000000000000000000000000000000000..47d9ff698f1f0644a0a6d077db00dc363cd1cd32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/z3_leaf_module.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from typing import List, Type + + +def z3_leaf_module(model: torch.nn.Module) -> bool: + """Returns whether a module in `model` has been flagged as a 'leaf' module. + See `set_z3_leaf_modules` for more details. + Args: + model (torch.nn.Module): The model to which the leaf module flag will be applied. + Returns: + bool: Whether the module has been flagged as a 'leaf' module. + """ + return hasattr(model, '_z3_leaf') and model._z3_leaf + + +def z3_leaf_parameter(model: torch.nn.Parameter) -> bool: + """Returns whether a parameter belongs to a leaf module. + See `set_z3_leaf_modules` for more details. + Args: + model (torch.nn.Parameter): The parameter to which the leaf module flag will be applied. + Returns: + bool: Whether the parameter belongs to a leaf module. + """ + return hasattr(model, 'ds_z3_leaf_module') + + +def get_z3_leaf_modules(model: torch.nn.Module) -> List[torch.nn.Module]: + """Returns a list of modules in `model` that have been flagged as 'leaf' modules. + See `set_z3_leaf_modules` for more details. + Args: + model (torch.nn.Module): The model to which the leaf module flag will be applied. + Returns: + List[torch.nn.Module]: A list of modules that have been flagged as 'leaf' modules. + """ + return [module for module in model.modules() if z3_leaf_module(module)] + + +def _do_set_z3_leaf_modules(model: torch.nn.Module, leaf_module_classes: List[Type], + flag: bool) -> List[torch.nn.Module]: + assert all(isinstance(module_class, type) for module_class in leaf_module_classes), \ + f'leaf_module_classes must be a list of types, got {leaf_module_classes}' + + leaf_modules = [] + + def _set_z3_leaf_flag(model: torch.nn.Module): + nonlocal leaf_modules + if model.__class__ in leaf_module_classes: + model._z3_leaf = flag + leaf_modules.append(model) + + model.apply(_set_z3_leaf_flag) + + if len(leaf_modules) == 0: + raise ValueError(f'No modules of type {leaf_module_classes} found in model {model}') + + return leaf_modules + + +def set_z3_leaf_modules(model: torch.nn.Module, leaf_module_classes: List[Type]) -> List[torch.nn.Module]: + """Sets a flag within a module in `model` to instruct ZeRO3 to stop setting hooks recursively when it encounters a module class listed in `leaf_module_classes`. + This is particularly useful in the context of Mixture of Experts (MoE) models. In MoE models, the computation order of experts varies across forward passes. This variability can disrupt ZeRO3's functionality, as ZeRO3 relies on tracking the computation order of modules to prefetch parameters efficiently. By designating a module as a 'leaf' node, ZeRO3 will prefetch parameters for all child modules upon entering the module. + Another scenario where this functionality is beneficial is in models with excessively fine-grained nested modules, where it helps to avoid the overhead associated with hooks. + Args: + model (torch.nn.Module): The model to which the leaf module flag will be applied. + leaf_module_classes (List[Type]): A list of module classes that should be flagged as 'leaf' modules. + Returns: + List[torch.nn.Module]: A list of modules that match the module classes in `leaf_module_classes`. + """ + return _do_set_z3_leaf_modules(model, leaf_module_classes, True) + + +def unset_z3_leaf_modules(model: torch.nn.Module, leaf_module_classes: List[Type]) -> List[torch.nn.Module]: + """Unsets a flag within a module in `model` to instruct ZeRO3 to resume setting hooks recursively when it encounters a module class listed in `leaf_module_classes`. + See `set_z3_leaf_modules` for more details. + Args: + model (torch.nn.Module): The model to which the leaf module flag will be applied. + leaf_module_classes (List[Type]): A list of module classes that should be flagged as 'leaf' modules. + Returns: + List[torch.nn.Module]: A list of modules that match the module classes in `leaf_module_classes`. + """ + return _do_set_z3_leaf_modules(model, leaf_module_classes, False) diff --git a/venv/lib/python3.10/site-packages/deepspeed/utils/zero_to_fp32.py b/venv/lib/python3.10/site-packages/deepspeed/utils/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..24cc342e78d1a006c782b3a4cd68d9ce786d8fd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/utils/zero_to_fp32.py @@ -0,0 +1,604 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: python zero_to_fp32.py . pytorch_model.bin + +import argparse +import torch +import glob +import math +import os +import re +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in param_shapes.items(): + + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None, exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters) + print(f"Saving fp32 state dict to {output_file}") + torch.save(state_dict, output_file) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument( + "output_file", + type=str, + help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_file, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a9a9c3869ac2902f8244a8cd3c8a4b21cba17e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2c96bec20e3c7a59f8f78b30e7fd5142d015e42f2cbd27223c3e862c53e4113 +size 3527040 diff --git a/venv/lib/python3.10/site-packages/numpy/core/tests/data/astype_copy.pkl b/venv/lib/python3.10/site-packages/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 0000000000000000000000000000000000000000..45694ae001c4a103365ff9fd5ae2da0dba3c11f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy/core/tests/data/astype_copy.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9564b309cbf3441ff0a6e4468fddaca46230fab34f15c77d87025a455bdf59d9 +size 716 diff --git a/venv/lib/python3.10/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl b/venv/lib/python3.10/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl new file mode 100644 index 0000000000000000000000000000000000000000..661ff83ca3d83da9c3812f0048f0b7d1ba62aa25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:561d36b5dc82ca96bc35be10cdd5619c30225c43b6590adcc1cbce7430c5179c +size 718 diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e7b65eaf628360e6f32f4140fcdd7ec7c2b7077 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-present The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b71284e1aa6d31a63ce7656d9846382620dbcf36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/RECORD @@ -0,0 +1,1037 @@ +../../../bin/pip,sha256=4SNuhsEtadxeLR3iWMynEv5hPgS_NBSTCnqM8WXrp9A,250 +../../../bin/pip3,sha256=4SNuhsEtadxeLR3iWMynEv5hPgS_NBSTCnqM8WXrp9A,250 +../../../bin/pip3.10,sha256=4SNuhsEtadxeLR3iWMynEv5hPgS_NBSTCnqM8WXrp9A,250 +../../../bin/pip3.10,sha256=4SNuhsEtadxeLR3iWMynEv5hPgS_NBSTCnqM8WXrp9A,250 +pip-22.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-22.0.2.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093 +pip-22.0.2.dist-info/METADATA,sha256=Yixa0LKkyzjT2N5JQO5qYDgZcmTs6Z6dg4UbwBNyT2A,4166 +pip-22.0.2.dist-info/RECORD,, +pip-22.0.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip-22.0.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +pip-22.0.2.dist-info/entry_points.txt,sha256=vUvIlB_ga0fFQuWvFEq6uJKftMG_HNuoe4kgXkb5rNY,126 +pip-22.0.2.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip/__init__.py,sha256=PZBF-ESk5Q0DZxQd4HHmTU_wX8y1ynzxBCRdu_fxHSI,357 +pip/__main__.py,sha256=mXwWDftNLMKfwVqKFWGE_uuBZvGSIiUELhLkeysIuZc,1198 +pip/__pycache__/__init__.cpython-310.pyc,, +pip/__pycache__/__main__.cpython-310.pyc,, +pip/_internal/__init__.py,sha256=nnFCuxrPMgALrIDxSoy-H6Zj4W4UY60D-uL1aJyq0pc,573 +pip/_internal/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/__pycache__/build_env.cpython-310.pyc,, +pip/_internal/__pycache__/cache.cpython-310.pyc,, +pip/_internal/__pycache__/configuration.cpython-310.pyc,, +pip/_internal/__pycache__/exceptions.cpython-310.pyc,, +pip/_internal/__pycache__/main.cpython-310.pyc,, +pip/_internal/__pycache__/pyproject.cpython-310.pyc,, +pip/_internal/__pycache__/self_outdated_check.cpython-310.pyc,, +pip/_internal/__pycache__/wheel_builder.cpython-310.pyc,, +pip/_internal/build_env.py,sha256=QAsnxJFvj74jS2cZUcxk7zXLvrtAYiRL0EkSPkpSJTo,9739 +pip/_internal/cache.py,sha256=71eaYwrls34HJ6gzbmmYiotiKhPNFTM_tqYJXD5nf3s,9441 +pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 +pip/_internal/cli/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-310.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-310.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-310.pyc,, +pip/_internal/cli/__pycache__/command_context.cpython-310.pyc,, +pip/_internal/cli/__pycache__/main.cpython-310.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-310.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-310.pyc,, +pip/_internal/cli/__pycache__/progress_bars.cpython-310.pyc,, +pip/_internal/cli/__pycache__/req_command.cpython-310.pyc,, +pip/_internal/cli/__pycache__/spinners.cpython-310.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-310.pyc,, +pip/_internal/cli/autocompletion.py,sha256=wY2JPZY2Eji1vhR7bVo-yCBPJ9LCy6P80iOAhZD1Vi8,6676 +pip/_internal/cli/base_command.py,sha256=6IVFmOjObv0ILip28QcgP8glhXHiGRvU_9kO35Hr7Z0,8037 +pip/_internal/cli/cmdoptions.py,sha256=GT2G2YKBj-851qGseugn2Veq7fJe3FA30gWdcziPQvo,28525 +pip/_internal/cli/command_context.py,sha256=a1pBBvvGLDiZ1Kw64_4tT6HmRTwYDoYy8JFgG5Czn7s,760 +pip/_internal/cli/main.py,sha256=ioJ8IVlb2K1qLOxR-tXkee9lURhYV89CDM71MKag7YY,2472 +pip/_internal/cli/main_parser.py,sha256=Q9TnytfuC5Z2JSjBFWVGtEdYLFy7rukNIb04movHdAo,2614 +pip/_internal/cli/parser.py,sha256=CDXTuFr2UD8ozOlZYf1KDziQdo9-X_IaYOiUcyJQwrA,10788 +pip/_internal/cli/progress_bars.py,sha256=_52w11WoZrvDSR3oItLWvLrEZFUKAfLf4Y6I6WtOnIU,10339 +pip/_internal/cli/req_command.py,sha256=VwqonOy18QwZsRsVjHhp-6w15fG9x3Ltwoa8yJqQno8,18669 +pip/_internal/cli/spinners.py,sha256=TFhjxtOnLeNJ5YmRvQm4eKPgPbJNkZiqO8jOXuxRaYU,5076 +pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116 +pip/_internal/commands/__init__.py,sha256=Vc1HjsLEtyCh7506OozPHPKXe2Hk-z9cFkFF3BMj1lM,3736 +pip/_internal/commands/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/commands/__pycache__/cache.cpython-310.pyc,, +pip/_internal/commands/__pycache__/check.cpython-310.pyc,, +pip/_internal/commands/__pycache__/completion.cpython-310.pyc,, +pip/_internal/commands/__pycache__/configuration.cpython-310.pyc,, +pip/_internal/commands/__pycache__/debug.cpython-310.pyc,, +pip/_internal/commands/__pycache__/download.cpython-310.pyc,, +pip/_internal/commands/__pycache__/freeze.cpython-310.pyc,, +pip/_internal/commands/__pycache__/hash.cpython-310.pyc,, +pip/_internal/commands/__pycache__/help.cpython-310.pyc,, +pip/_internal/commands/__pycache__/index.cpython-310.pyc,, +pip/_internal/commands/__pycache__/install.cpython-310.pyc,, +pip/_internal/commands/__pycache__/list.cpython-310.pyc,, +pip/_internal/commands/__pycache__/search.cpython-310.pyc,, +pip/_internal/commands/__pycache__/show.cpython-310.pyc,, +pip/_internal/commands/__pycache__/uninstall.cpython-310.pyc,, +pip/_internal/commands/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/commands/cache.py,sha256=p9gvc6W_xgxE2zO0o8NXqO1gGJEinEK42qEC-a7Cnuk,7524 +pip/_internal/commands/check.py,sha256=0gjXR7j36xJT5cs2heYU_dfOfpnFfzX8OoPNNoKhqdM,1685 +pip/_internal/commands/completion.py,sha256=kTG_I1VR3N5kGC4Ma9pQTSoY9Q1URCrNyseHSQ-rCL4,2958 +pip/_internal/commands/configuration.py,sha256=arE8vLstjBg-Ar1krXF-bBmT1qBtnL7Fpk-NVh38a0U,8944 +pip/_internal/commands/debug.py,sha256=krET-y45CnQzXwKR1qA3M_tJE4LE2vnQtm3yfGyDSnE,6629 +pip/_internal/commands/download.py,sha256=gVIAEOcpWolhRj9hl89Qzn52G2b_pcZ8naXhxaXobdo,4942 +pip/_internal/commands/freeze.py,sha256=PaJJB9mT_3vHeZ3mbFL_m1fzTYL-_Or3kDtXwTdZZ-A,2968 +pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703 +pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132 +pip/_internal/commands/index.py,sha256=8pYkICUJlccjm3E83b7UuZ5DtOfLh1N7ZHXAgkajjHo,4849 +pip/_internal/commands/install.py,sha256=YVygBF6vfrNi0jmdNBCM6bcoWb7vaALEGG1--8Mmf88,27893 +pip/_internal/commands/list.py,sha256=aKt1PP7enTiNLD_1qDXXaIKQ2QvLmUDfoQU6SYxJ8Ek,12318 +pip/_internal/commands/search.py,sha256=sbBZiARRc050QquOKcCvOr2K3XLsoYebLKZGRi__iUI,5697 +pip/_internal/commands/show.py,sha256=2VicM3jF0YWgn4O1jG_QF5oxOT0ln57VDu1NE6hqWcM,5859 +pip/_internal/commands/uninstall.py,sha256=DNTYAGJNljMO_YYBxrpcwj0FEl7lo_P55_98O6g2TNk,3526 +pip/_internal/commands/wheel.py,sha256=7HAjLclZxIzBrX6JmhmGBVxH5xrjaBYCtSdpQi1pWCE,6206 +pip/_internal/configuration.py,sha256=qmCX3uuVM73PQeAuWQHic22bhops8s31B8k02nFAoiQ,13171 +pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858 +pip/_internal/distributions/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/base.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/installed.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/sdist.cpython-310.pyc,, +pip/_internal/distributions/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/distributions/base.py,sha256=3FUYD8Gb4YuSu3pggC_FRctZBDbpm5ZK89tPksIUjoE,1172 +pip/_internal/distributions/installed.py,sha256=HzfNRu3smoOm54m8H2iK6LHzBx6_DEnka4OPEsizbXg,680 +pip/_internal/distributions/sdist.py,sha256=0nJvU1RhZtbwaeYtLbzSwYrbGRcY6IgNsWdEhAHROK8,5499 +pip/_internal/distributions/wheel.py,sha256=-NgzdIs-w_hcer_U81yzgpVTljJRg5m79xufqvbjv0s,1115 +pip/_internal/exceptions.py,sha256=U-dV1ixkSz6NAU6Aw9dosKi2EzZ5D3BA7ilYZuTLKeU,20912 +pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30 +pip/_internal/index/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/index/__pycache__/collector.cpython-310.pyc,, +pip/_internal/index/__pycache__/package_finder.cpython-310.pyc,, +pip/_internal/index/__pycache__/sources.cpython-310.pyc,, +pip/_internal/index/collector.py,sha256=8kXlmlnZ-qAknyxd0duCn5mxFHX-zr468ykutk8WOwo,21392 +pip/_internal/index/package_finder.py,sha256=9UVg-7582nYNEWa0cIIl8otzPm4mlfyrQVuozAcssLo,36783 +pip/_internal/index/sources.py,sha256=SVyPitv08-Qalh2_Bk5diAJ9GAA_d-a93koouQodAG0,6557 +pip/_internal/locations/__init__.py,sha256=ergvPwlfNTmQYFmaRYbj--ZwTN5izgTL9KE5d0FB7-8,17362 +pip/_internal/locations/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/locations/__pycache__/_distutils.cpython-310.pyc,, +pip/_internal/locations/__pycache__/_sysconfig.cpython-310.pyc,, +pip/_internal/locations/__pycache__/base.cpython-310.pyc,, +pip/_internal/locations/_distutils.py,sha256=Sk7tw8ZP1DWMYJ8MibABsa8IME2Ejv1PKeGlYQCBTZc,5871 +pip/_internal/locations/_sysconfig.py,sha256=LQNKTJKyjVqxXaPntlBwdUqTG1xwYf6GVCKMbyRJx5M,7918 +pip/_internal/locations/base.py,sha256=x5D1ONktmPJd8nnUTh-ELsAJ7fiXA-k-0a_vhfi2_Us,1579 +pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340 +pip/_internal/metadata/__init__.py,sha256=iGoDbe_iTXQTIAEVy9f7dm-VQfZANO8kkwFr1CpqxqI,2036 +pip/_internal/metadata/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/metadata/__pycache__/base.cpython-310.pyc,, +pip/_internal/metadata/__pycache__/pkg_resources.cpython-310.pyc,, +pip/_internal/metadata/base.py,sha256=SCRPtShrtPy0lfFxuaFTgJJHsRXToGFToQUAZoBBbeA,19429 +pip/_internal/metadata/pkg_resources.py,sha256=wAnEtrcgH9YtV996MfoBjR2hGLHvi3uxk0vUOHbqBak,9456 +pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 +pip/_internal/models/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-310.pyc,, +pip/_internal/models/__pycache__/direct_url.cpython-310.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-310.pyc,, +pip/_internal/models/__pycache__/index.cpython-310.pyc,, +pip/_internal/models/__pycache__/link.cpython-310.pyc,, +pip/_internal/models/__pycache__/scheme.cpython-310.pyc,, +pip/_internal/models/__pycache__/search_scope.cpython-310.pyc,, +pip/_internal/models/__pycache__/selection_prefs.cpython-310.pyc,, +pip/_internal/models/__pycache__/target_python.cpython-310.pyc,, +pip/_internal/models/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/models/candidate.py,sha256=6pcABsaR7CfIHlbJbr2_kMkVJFL_yrYjTx6SVWUnCPQ,990 +pip/_internal/models/direct_url.py,sha256=7XtGQSLLDQb5ZywI2EMnnLcddtf5CJLx44lMtTHPxFw,6350 +pip/_internal/models/format_control.py,sha256=DJpMYjxeYKKQdwNcML2_F0vtAh-qnKTYe-CpTxQe-4g,2520 +pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030 +pip/_internal/models/link.py,sha256=hoT_qsOBAgLBm9GKqpBrNF_mrEXeGXQE-aH_RX2cGgg,9817 +pip/_internal/models/scheme.py,sha256=3EFQp_ICu_shH1-TBqhl0QAusKCPDFOlgHFeN4XowWs,738 +pip/_internal/models/search_scope.py,sha256=LwloG0PJAmtI1hFXIypsD95kWE9xfR5hf_a2v1Vw7sk,4520 +pip/_internal/models/selection_prefs.py,sha256=KZdi66gsR-_RUXUr9uejssk3rmTHrQVJWeNA2sV-VSY,1907 +pip/_internal/models/target_python.py,sha256=qKpZox7J8NAaPmDs5C_aniwfPDxzvpkrCKqfwndG87k,3858 +pip/_internal/models/wheel.py,sha256=wlyz23BcZ40nBLX3rXKtrV6tmc8-8RxHyV-hq5zJ74Q,3525 +pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50 +pip/_internal/network/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/network/__pycache__/auth.cpython-310.pyc,, +pip/_internal/network/__pycache__/cache.cpython-310.pyc,, +pip/_internal/network/__pycache__/download.cpython-310.pyc,, +pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc,, +pip/_internal/network/__pycache__/session.cpython-310.pyc,, +pip/_internal/network/__pycache__/utils.cpython-310.pyc,, +pip/_internal/network/__pycache__/xmlrpc.cpython-310.pyc,, +pip/_internal/network/auth.py,sha256=a3C7Xaa8kTJjXkdi_wrUjqaySc8Z9Yz7U6QIbXfzMyc,12190 +pip/_internal/network/cache.py,sha256=FJ3uTUo3wgf2KHmeZ3ltN9x3tQoy_0X6qNsRtNXsuL0,2131 +pip/_internal/network/download.py,sha256=12Ef_L7MlhNUN_0-n_3DggozWJER8c9J0us16cbvkKA,6062 +pip/_internal/network/lazy_wheel.py,sha256=1b8ZJ1w4bSBzpGzGwJR_CL2yQ6AFIwWQkS1vbPPw2XU,7627 +pip/_internal/network/session.py,sha256=38IKGKC64MTVUIH5XOR1hr2pOCzp39RccykdmGAvqRU,16729 +pip/_internal/network/utils.py,sha256=igLlTu_-q0LmL8FdJKq-Uj7AT_owrQ-T9FfyarkhK5U,4059 +pip/_internal/network/xmlrpc.py,sha256=AzQgG4GgS152_cqmGr_Oz2MIXsCal-xfsis7fA7nmU0,1791 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/__pycache__/check.cpython-310.pyc,, +pip/_internal/operations/__pycache__/freeze.cpython-310.pyc,, +pip/_internal/operations/__pycache__/prepare.cpython-310.pyc,, +pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/build/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata_editable.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel_editable.cpython-310.pyc,, +pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-310.pyc,, +pip/_internal/operations/build/metadata.py,sha256=ES_uRmAvhrNm_nDTpZxshBfUsvnXtkj-g_4rZrH9Rww,1404 +pip/_internal/operations/build/metadata_editable.py,sha256=_Rai0VZjxoeJUkjkuICrq45LtjwFoDOveosMYH43rKc,1456 +pip/_internal/operations/build/metadata_legacy.py,sha256=o-eU21As175hDC7dluM1fJJ_FqokTIShyWpjKaIpHZw,2198 +pip/_internal/operations/build/wheel.py,sha256=AO9XnTGhTgHtZmU8Dkbfo1OGr41rBuSDjIgAa4zUKgE,1063 +pip/_internal/operations/build/wheel_editable.py,sha256=TVETY-L_M_dSEKBhTIcQOP75zKVXw8tuq1U354Mm30A,1405 +pip/_internal/operations/build/wheel_legacy.py,sha256=C9j6rukgQI1n_JeQLoZGuDdfUwzCXShyIdPTp6edbMQ,3064 +pip/_internal/operations/check.py,sha256=ca4O9CkPt9Em9sLCf3H0iVt1GIcW7M8C0U5XooaBuT4,5109 +pip/_internal/operations/freeze.py,sha256=ZiYw5GlUpLVx4VJHz4S1AP2JFNyvH0iq5kpcYj2ovyw,9770 +pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51 +pip/_internal/operations/install/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/operations/install/__pycache__/editable_legacy.cpython-310.pyc,, +pip/_internal/operations/install/__pycache__/legacy.cpython-310.pyc,, +pip/_internal/operations/install/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/operations/install/editable_legacy.py,sha256=ee4kfJHNuzTdKItbfAsNOSEwq_vD7DRPGkBdK48yBhU,1354 +pip/_internal/operations/install/legacy.py,sha256=x7BG8kBm0K3JO6AR4sBl0zh2LOrfUaz7EdNt-keHBv4,4091 +pip/_internal/operations/install/wheel.py,sha256=QuQyCZE-XjuJjDYRixo40oUt2ucFhNmSrCbcXY7A9aE,27412 +pip/_internal/operations/prepare.py,sha256=LJP97jsuiCAaTGVIRrcINvxc1ntVsB45MoRbyMIukg4,24145 +pip/_internal/pyproject.py,sha256=Wm2ljdT6spC-tSdf1LBRaMYSJaXr1xUxV3OwdHCW9jc,6722 +pip/_internal/req/__init__.py,sha256=A7mUvT1KAcCYP3H7gUOTx2GRMlgoDur3H68Q0OJqM5A,2793 +pip/_internal/req/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_file.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_install.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_set.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_tracker.cpython-310.pyc,, +pip/_internal/req/__pycache__/req_uninstall.cpython-310.pyc,, +pip/_internal/req/constructors.py,sha256=fXmtNI_J77JFP_HRvYcQW-1nKw3AiUu6Q3b1Nm8aMm0,16094 +pip/_internal/req/req_file.py,sha256=5N8OTouPCof-305StC2YK9HBxQMw-xO46skRoBPbkZo,17421 +pip/_internal/req/req_install.py,sha256=jU1HQBT_DnXZean7jY8wPNMhb6_CzdKHcilHFY_o-Fc,32524 +pip/_internal/req/req_set.py,sha256=kHYiLvkKRx21WaLTwOI-54Ng0SSzZZ9SE7FD0PsfvYA,7584 +pip/_internal/req/req_tracker.py,sha256=jK7JDu-Wt73X-gqozrFtgJVlUlnQo0P4IQ4x4_gPlfM,4117 +pip/_internal/req/req_uninstall.py,sha256=K2BHYRRJAfkSpFqcPzc9XfX2EvbhaRtQIPRFmMtUdfo,23814 +pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/__pycache__/base.cpython-310.pyc,, +pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583 +pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/legacy/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/legacy/__pycache__/resolver.cpython-310.pyc,, +pip/_internal/resolution/legacy/resolver.py,sha256=b7bf5qL1ROg73sl8dhTvLdD1w5XF8xybBAF6eF_kz7c,18288 +pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/resolution/resolvelib/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/base.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/candidates.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/factory.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/found_candidates.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/provider.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/reporter.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/requirements.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/__pycache__/resolver.cpython-310.pyc,, +pip/_internal/resolution/resolvelib/base.py,sha256=u1O4fkvCO4mhmu5i32xrDv9AX5NgUci_eYVyBDQhTIM,5220 +pip/_internal/resolution/resolvelib/candidates.py,sha256=KR5jxZRSahByOABXbwrX-zNoawa7Gm9Iss-HrvrcvNw,18357 +pip/_internal/resolution/resolvelib/factory.py,sha256=0bbxnUSSjaeTmtIEgeeKtEqhEFfNhv3xpq7j9IaMq2c,28298 +pip/_internal/resolution/resolvelib/found_candidates.py,sha256=hvL3Hoa9VaYo-qEOZkBi2Iqw251UDxPz-uMHVaWmLpE,5705 +pip/_internal/resolution/resolvelib/provider.py,sha256=LzQQyzMVaZYAwLgKInbq-it6mbQL1gX0hGohz5Cr5wg,9915 +pip/_internal/resolution/resolvelib/reporter.py,sha256=3ZVVYrs5PqvLFJkGLcuXoMK5mTInFzl31xjUpDBpZZk,2526 +pip/_internal/resolution/resolvelib/requirements.py,sha256=B1ndvKPSuyyyTEXt9sKhbwminViSWnBrJa7qO2ln4Z0,5455 +pip/_internal/resolution/resolvelib/resolver.py,sha256=ucoVKHtwH6gkZjcfIVJbUiOIHLqJxeYlrKTMIJciYwM,11335 +pip/_internal/self_outdated_check.py,sha256=GKSatNlt2cz_CMGxu72FbUzuPaXpWOnIVKOOYIk0gvY,6849 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/utils/__pycache__/_log.cpython-310.pyc,, +pip/_internal/utils/__pycache__/appdirs.cpython-310.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-310.pyc,, +pip/_internal/utils/__pycache__/compatibility_tags.cpython-310.pyc,, +pip/_internal/utils/__pycache__/datetime.cpython-310.pyc,, +pip/_internal/utils/__pycache__/deprecation.cpython-310.pyc,, +pip/_internal/utils/__pycache__/direct_url_helpers.cpython-310.pyc,, +pip/_internal/utils/__pycache__/distutils_args.cpython-310.pyc,, +pip/_internal/utils/__pycache__/egg_link.cpython-310.pyc,, +pip/_internal/utils/__pycache__/encoding.cpython-310.pyc,, +pip/_internal/utils/__pycache__/entrypoints.cpython-310.pyc,, +pip/_internal/utils/__pycache__/filesystem.cpython-310.pyc,, +pip/_internal/utils/__pycache__/filetypes.cpython-310.pyc,, +pip/_internal/utils/__pycache__/glibc.cpython-310.pyc,, +pip/_internal/utils/__pycache__/hashes.cpython-310.pyc,, +pip/_internal/utils/__pycache__/inject_securetransport.cpython-310.pyc,, +pip/_internal/utils/__pycache__/logging.cpython-310.pyc,, +pip/_internal/utils/__pycache__/misc.cpython-310.pyc,, +pip/_internal/utils/__pycache__/models.cpython-310.pyc,, +pip/_internal/utils/__pycache__/packaging.cpython-310.pyc,, +pip/_internal/utils/__pycache__/setuptools_build.cpython-310.pyc,, +pip/_internal/utils/__pycache__/subprocess.cpython-310.pyc,, +pip/_internal/utils/__pycache__/temp_dir.cpython-310.pyc,, +pip/_internal/utils/__pycache__/unpacking.cpython-310.pyc,, +pip/_internal/utils/__pycache__/urls.cpython-310.pyc,, +pip/_internal/utils/__pycache__/virtualenv.cpython-310.pyc,, +pip/_internal/utils/__pycache__/wheel.cpython-310.pyc,, +pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015 +pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665 +pip/_internal/utils/compat.py,sha256=ACyBfLgj3_XG-iA5omEDrXqDM0cQKzi8h8HRBInzG6Q,1884 +pip/_internal/utils/compatibility_tags.py,sha256=ydin8QG8BHqYRsPY4OL6cmb44CbqXl1T0xxS97VhHkk,5377 +pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242 +pip/_internal/utils/deprecation.py,sha256=NKo8VqLioJ4nnXXGmW4KdasxF90EFHkZaHeX1fT08C8,3627 +pip/_internal/utils/direct_url_helpers.py,sha256=6F1tc2rcKaCZmgfVwsE6ObIe_Pux23mUVYA-2D9wCFc,3206 +pip/_internal/utils/distutils_args.py,sha256=mcAscyp80vTt3xAGTipnpgc83V-_wCvydNELVXLq7JI,1249 +pip/_internal/utils/egg_link.py,sha256=5MVlpz5LirT4iLQq86OYzjXaYF0D4Qk1dprEI7ThST4,2203 +pip/_internal/utils/encoding.py,sha256=bdZ3YgUpaOEBI5MP4-DEXiQarCW3V0rxw1kRz-TaU1Q,1169 +pip/_internal/utils/entrypoints.py,sha256=aPvCnQVi9Hdk35Kloww_D5ibjUpqxgqcJP8O9VuMZek,1055 +pip/_internal/utils/filesystem.py,sha256=rrl-rY1w8TYyKYndUyZlE9ffkQyA4-jI9x_59zXkn5s,5893 +pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716 +pip/_internal/utils/glibc.py,sha256=tDfwVYnJCOC0BNVpItpy8CGLP9BjkxFHdl0mTS0J7fc,3110 +pip/_internal/utils/hashes.py,sha256=anpZfFGIT6HcIj2td9NHtE8AWg6GeAIhwpP8GPvZE0E,4811 +pip/_internal/utils/inject_securetransport.py,sha256=o-QRVMGiENrTJxw3fAhA7uxpdEdw6M41TjHYtSVRrcg,795 +pip/_internal/utils/logging.py,sha256=Rvght-fDXL70VWib1cpgZ3iU-kXODV98bNeLUlbqVto,11522 +pip/_internal/utils/misc.py,sha256=MdUB12BMhj73sEmskEutmPyWFaJB7asoPCfLzs_YeT0,19359 +pip/_internal/utils/models.py,sha256=5GoYU586SrxURMvDn_jBMJInitviJg4O5-iOU-6I0WY,1193 +pip/_internal/utils/packaging.py,sha256=5Wm6_x7lKrlqVjPI5MBN_RurcRHwVYoQ7Ksrs84de7s,2108 +pip/_internal/utils/setuptools_build.py,sha256=vNH9hQB9wT6d-h1hVQhBKw91jNeT42meHpVeii-urOI,5652 +pip/_internal/utils/subprocess.py,sha256=vIWGpet5ARBmZ2Qn4NEHNgzCOduqbPIuByZmhhmr6mM,9182 +pip/_internal/utils/temp_dir.py,sha256=zob3PYMVevONkheOMUp_4jDofrEY3HIu5DHK78cSspI,7662 +pip/_internal/utils/unpacking.py,sha256=HUFlMEyCa9dPwdLh6sWeh95DeKytV8rsOyKShEw9y6g,8906 +pip/_internal/utils/urls.py,sha256=AhaesUGl-9it6uvG6fsFPOr9ynFpGaTMk4t5XTX7Z_Q,1759 +pip/_internal/utils/virtualenv.py,sha256=4_48qMzCwB_F5jIK5BC_ua7uiAMVifmQWU9NdaGUoVA,3459 +pip/_internal/utils/wheel.py,sha256=lXOgZyTlOm5HmK8tw5iw0A3_5A6wRzsXHOaQkIvvloU,4549 +pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596 +pip/_internal/vcs/__pycache__/__init__.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/bazaar.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/git.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/mercurial.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/subversion.cpython-310.pyc,, +pip/_internal/vcs/__pycache__/versioncontrol.cpython-310.pyc,, +pip/_internal/vcs/bazaar.py,sha256=IGb5ca1xSZfgegRD2_JeyoZPrQQHs7lEYEIgpVsKpoU,3047 +pip/_internal/vcs/git.py,sha256=mjhwudCx9WlLNkxZ6_kOKmueF0rLoU2i1xeASKF6yiQ,18116 +pip/_internal/vcs/mercurial.py,sha256=Bzbd518Jsx-EJI0IhIobiQqiRsUv5TWYnrmRIFWE0Gw,5238 +pip/_internal/vcs/subversion.py,sha256=TEMRdwECvMcXakZX0pTNUep79kmBYkWDkWFkrYmcmac,11718 +pip/_internal/vcs/versioncontrol.py,sha256=KUOc-hN51em9jrqxKwUR3JnkgSE-xSOqMiiJcSaL6B8,22811 +pip/_internal/wheel_builder.py,sha256=65rOA8FSYt3c3HyqEw17uujjlCgqmoKEIv6rv9xN2NM,12307 +pip/_vendor/__init__.py,sha256=xjcBX0EP50pkaMdCssrsBXoZgo2hTtYxlcH1CIyA3T4,4708 +pip/_vendor/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/__pycache__/distro.cpython-310.pyc,, +pip/_vendor/__pycache__/six.cpython-310.pyc,, +pip/_vendor/__pycache__/typing_extensions.cpython-310.pyc,, +pip/_vendor/cachecontrol/__init__.py,sha256=1j_YQfjmiix6YyouLrftC6NzksAm8e8xGSjMKMRPIkM,465 +pip/_vendor/cachecontrol/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/adapter.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/controller.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/serialize.cpython-310.pyc,, +pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-310.pyc,, +pip/_vendor/cachecontrol/_cmd.py,sha256=lxUXqfNTVx84zf6tcWbkLZHA6WVBRtJRpfeA9ZqhaAY,1379 +pip/_vendor/cachecontrol/adapter.py,sha256=ew9OYEQHEOjvGl06ZsuX8W3DAvHWsQKHwWAxISyGug8,5033 +pip/_vendor/cachecontrol/cache.py,sha256=eMS9Bn9JWQkHiIYA5GPRBqKVU95uS-yXkxrzpoafRig,917 +pip/_vendor/cachecontrol/caches/__init__.py,sha256=gGFOtIH8QDRvkP4YAfGIh-u9YYcGZVxwLM1-6e1mPNI,170 +pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-310.pyc,, +pip/_vendor/cachecontrol/caches/file_cache.py,sha256=P2KHcNXiqxEW7fCq5KC-NYHGSk0nNR9NIKuN-vBTn-E,4251 +pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=tu_YBV7EV8vdBRGazUErkoRqYYjSBmNcB8dZ7BNomqk,940 +pip/_vendor/cachecontrol/compat.py,sha256=LNx7vqBndYdHU8YuJt53ab_8rzMGTXVrvMb7CZJkxG0,778 +pip/_vendor/cachecontrol/controller.py,sha256=9DSEiV58Gx7Ce69fLCrRcpN-_sHzXTY4ol9bEviatR0,15625 +pip/_vendor/cachecontrol/filewrapper.py,sha256=X4BAQOO26GNOR7nH_fhTzAfeuct2rBQcx_15MyFBpcs,3946 +pip/_vendor/cachecontrol/heuristics.py,sha256=8kAyuZLSCyEIgQr6vbUwfhpqg9ows4mM0IV6DWazevI,4154 +pip/_vendor/cachecontrol/serialize.py,sha256=dlySaeA5U7Q5eHvjiObgo1M8j8_huVjfWjid7Aq-r8c,6783 +pip/_vendor/cachecontrol/wrapper.py,sha256=X3-KMZ20Ho3VtqyVaXclpeQpFzokR5NE8tZSfvKVaB8,774 +pip/_vendor/certifi/__init__.py,sha256=xWdRgntT3j1V95zkRipGOg_A1UfEju2FcpujhysZLRI,62 +pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255 +pip/_vendor/certifi/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/certifi/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/certifi/__pycache__/core.cpython-310.pyc,, +pip/_vendor/certifi/cacert.pem,sha256=-og4Keu4zSpgL5shwfhd4kz0eUnVILzrGCi0zRy2kGw,265969 +pip/_vendor/certifi/core.py,sha256=CcwptmiI-3M50jIdO0HT6Fh6W_wqGsf8QcX9yfzvyuc,2791 +pip/_vendor/chardet/__init__.py,sha256=mWZaWmvZkhwfBEAT9O1Y6nRTfKzhT7FHhQTTAujbqUA,3271 +pip/_vendor/chardet/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/big5freq.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/big5prober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/chardistribution.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/charsetprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/cp949prober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/enums.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/escprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/escsm.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/eucjpprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/euckrfreq.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/euckrprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/euctwfreq.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/euctwprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/gb2312freq.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/gb2312prober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/hebrewprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/jisfreq.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/jpcntx.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langthaimodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/latin1prober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/mbcssm.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/sjisprober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/universaldetector.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/utf8prober.cpython-310.pyc,, +pip/_vendor/chardet/__pycache__/version.cpython-310.pyc,, +pip/_vendor/chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254 +pip/_vendor/chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757 +pip/_vendor/chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411 +pip/_vendor/chardet/charsetgroupprober.py,sha256=GZLReHP6FRRn43hvSOoGCxYamErKzyp6RgOQxVeC3kg,3839 +pip/_vendor/chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110 +pip/_vendor/chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pip/_vendor/chardet/cli/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-310.pyc,, +pip/_vendor/chardet/cli/chardetect.py,sha256=XK5zqjUG2a4-y6eLHZ8ThYcp6WWUrdlmELxNypcc2SE,2747 +pip/_vendor/chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590 +pip/_vendor/chardet/compat.py,sha256=40zr6wICZwknxyuLGGcIOPyve8DTebBCbbvttvnmp5Q,1200 +pip/_vendor/chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855 +pip/_vendor/chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661 +pip/_vendor/chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950 +pip/_vendor/chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510 +pip/_vendor/chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749 +pip/_vendor/chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546 +pip/_vendor/chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748 +pip/_vendor/chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621 +pip/_vendor/chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747 +pip/_vendor/chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715 +pip/_vendor/chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754 +pip/_vendor/chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838 +pip/_vendor/chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777 +pip/_vendor/chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643 +pip/_vendor/chardet/langbulgarianmodel.py,sha256=rk9CJpuxO0bObboJcv6gNgWuosYZmd8qEEds5y7DS_Y,105697 +pip/_vendor/chardet/langgreekmodel.py,sha256=S-uNQ1ihC75yhBvSux24gLFZv3QyctMwC6OxLJdX-bw,99571 +pip/_vendor/chardet/langhebrewmodel.py,sha256=DzPP6TPGG_-PV7tqspu_d8duueqm7uN-5eQ0aHUw1Gg,98776 +pip/_vendor/chardet/langhungarianmodel.py,sha256=RtJH7DZdsmaHqyK46Kkmnk5wQHiJwJPPJSqqIlpeZRc,102498 +pip/_vendor/chardet/langrussianmodel.py,sha256=THqJOhSxiTQcHboDNSc5yofc2koXXQFHFyjtyuntUfM,131180 +pip/_vendor/chardet/langthaimodel.py,sha256=R1wXHnUMtejpw0JnH_JO8XdYasME6wjVqp1zP7TKLgg,103312 +pip/_vendor/chardet/langturkishmodel.py,sha256=rfwanTptTwSycE4-P-QasPmzd-XVYgevytzjlEzBBu8,95946 +pip/_vendor/chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370 +pip/_vendor/chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413 +pip/_vendor/chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012 +pip/_vendor/chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481 +pip/_vendor/chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/chardet/metadata/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/chardet/metadata/__pycache__/languages.cpython-310.pyc,, +pip/_vendor/chardet/metadata/languages.py,sha256=41tLq3eLSrBEbEVVQpVGFq9K7o1ln9b1HpY1l0hCUQo,19474 +pip/_vendor/chardet/sbcharsetprober.py,sha256=nmyMyuxzG87DN6K3Rk2MUzJLMLR69MrWpdnHzOwVUwQ,6136 +pip/_vendor/chardet/sbcsgroupprober.py,sha256=hqefQuXmiFyDBArOjujH6hd6WFXlOD1kWCsxDhjx5Vc,4309 +pip/_vendor/chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774 +pip/_vendor/chardet/universaldetector.py,sha256=DpZTXCX0nUHXxkQ9sr4GZxGB_hveZ6hWt3uM94cgWKs,12503 +pip/_vendor/chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766 +pip/_vendor/chardet/version.py,sha256=A4CILFAd8MRVG1HoXPp45iK9RLlWyV73a1EtwE8Tvn8,242 +pip/_vendor/colorama/__init__.py,sha256=pCdErryzLSzDW5P-rRPBlPLqbBtIRNJB6cMgoeJns5k,239 +pip/_vendor/colorama/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/colorama/__pycache__/ansi.cpython-310.pyc,, +pip/_vendor/colorama/__pycache__/ansitowin32.cpython-310.pyc,, +pip/_vendor/colorama/__pycache__/initialise.cpython-310.pyc,, +pip/_vendor/colorama/__pycache__/win32.cpython-310.pyc,, +pip/_vendor/colorama/__pycache__/winterm.cpython-310.pyc,, +pip/_vendor/colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522 +pip/_vendor/colorama/ansitowin32.py,sha256=yV7CEmCb19MjnJKODZEEvMH_fnbJhwnpzo4sxZuGXmA,10517 +pip/_vendor/colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915 +pip/_vendor/colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404 +pip/_vendor/colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438 +pip/_vendor/distlib/__init__.py,sha256=y-rKDBB99QJ3N1PJGAXQo89ou615aAeBjV2brBxKgM8,581 +pip/_vendor/distlib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/database.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/index.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/locators.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/manifest.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/markers.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/metadata.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/resources.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/scripts.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/util.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/version.cpython-310.pyc,, +pip/_vendor/distlib/__pycache__/wheel.cpython-310.pyc,, +pip/_vendor/distlib/compat.py,sha256=tfoMrj6tujk7G4UC2owL6ArgDuCKabgBxuJRGZSmpko,41259 +pip/_vendor/distlib/database.py,sha256=hBO2dgvDF7W3BqX8Ecns6p_RPerCaIbNKbdUOuJ1a14,51456 +pip/_vendor/distlib/index.py,sha256=UfcimNW19AB7IKWam4VaJbXuCBvArKfSxhV16EwavzE,20739 +pip/_vendor/distlib/locators.py,sha256=4D2hEcHePNuW4mXEZ3Cuw12eW-vbO-4WuAlbf4h5K7w,51963 +pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811 +pip/_vendor/distlib/markers.py,sha256=TpHHHLgkzyT7YHbwj-2i6weRaq-Ivy2-MUnrDkjau-U,5058 +pip/_vendor/distlib/metadata.py,sha256=vatoxFdmBr6ie-sTVXVNPOPG3uwMDWJTnEECnm7xDCw,39109 +pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820 +pip/_vendor/distlib/scripts.py,sha256=tjSwENINeV91ROZxec5zTSMRg2jEeKc4enyCHDzNvEE,17720 +pip/_vendor/distlib/util.py,sha256=31dPXn3Rfat0xZLeVoFpuniyhe6vsbl9_QN-qd9Lhlk,66262 +pip/_vendor/distlib/version.py,sha256=WG__LyAa2GwmA6qSoEJtvJE8REA1LZpbSizy8WvhJLk,23513 +pip/_vendor/distlib/wheel.py,sha256=pj5VVCjqZMcHvgizORWwAFPS7hOk61CZ59dxP8laQ4E,42943 +pip/_vendor/distro.py,sha256=O1EeHMq1-xAO373JI2_6pYEtd09yEkxtmrYkdY-9S-w,48414 +pip/_vendor/html5lib/__init__.py,sha256=BYzcKCqeEii52xDrqBFruhnmtmkiuHXFyFh-cglQ8mk,1160 +pip/_vendor/html5lib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/_inputstream.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/_utils.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/constants.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/html5parser.cpython-310.pyc,, +pip/_vendor/html5lib/__pycache__/serializer.cpython-310.pyc,, +pip/_vendor/html5lib/_ihatexml.py,sha256=ifOwF7pXqmyThIXc3boWc96s4MDezqRrRVp7FwDYUFs,16728 +pip/_vendor/html5lib/_inputstream.py,sha256=jErNASMlkgs7MpOM9Ve_VdLDJyFFweAjLuhVutZz33U,32353 +pip/_vendor/html5lib/_tokenizer.py,sha256=04mgA2sNTniutl2fxFv-ei5bns4iRaPxVXXHh_HrV_4,77040 +pip/_vendor/html5lib/_trie/__init__.py,sha256=nqfgO910329BEVJ5T4psVwQtjd2iJyEXQ2-X8c1YxwU,109 +pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-310.pyc,, +pip/_vendor/html5lib/_trie/__pycache__/py.cpython-310.pyc,, +pip/_vendor/html5lib/_trie/_base.py,sha256=CaybYyMro8uERQYjby2tTeSUatnWDfWroUN9N7ety5w,1013 +pip/_vendor/html5lib/_trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775 +pip/_vendor/html5lib/_utils.py,sha256=Dx9AKntksRjFT1veBj7I362pf5OgIaT0zglwq43RnfU,4931 +pip/_vendor/html5lib/constants.py,sha256=Ll-yzLU_jcjyAI_h57zkqZ7aQWE5t5xA4y_jQgoUUhw,83464 +pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/base.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/lint.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-310.pyc,, +pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-310.pyc,, +pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919 +pip/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945 +pip/_vendor/html5lib/filters/lint.py,sha256=jk6q56xY0ojiYfvpdP-OZSm9eTqcAdRqhCoPItemPYA,3643 +pip/_vendor/html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588 +pip/_vendor/html5lib/filters/sanitizer.py,sha256=m6oGmkBhkGAnn2nV6D4hE78SCZ6WEnK9rKdZB3uXBIc,26897 +pip/_vendor/html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214 +pip/_vendor/html5lib/html5parser.py,sha256=anr-aXre_ImfrkQ35c_rftKXxC80vJCREKe06Tq15HA,117186 +pip/_vendor/html5lib/serializer.py,sha256=_PpvcZF07cwE7xr9uKkZqh5f4UEaI8ltCU2xPJzaTpk,15759 +pip/_vendor/html5lib/treeadapters/__init__.py,sha256=A0rY5gXIe4bJOiSGRO_j_tFhngRBO8QZPzPtPw5dFzo,679 +pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-310.pyc,, +pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-310.pyc,, +pip/_vendor/html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715 +pip/_vendor/html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776 +pip/_vendor/html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592 +pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-310.pyc,, +pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-310.pyc,, +pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-310.pyc,, +pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-310.pyc,, +pip/_vendor/html5lib/treebuilders/base.py,sha256=z-o51vt9r_l2IDG5IioTOKGzZne4Fy3_Fc-7ztrOh4I,14565 +pip/_vendor/html5lib/treebuilders/dom.py,sha256=22whb0C71zXIsai5mamg6qzBEiigcBIvaDy4Asw3at0,8925 +pip/_vendor/html5lib/treebuilders/etree.py,sha256=w5ZFpKk6bAxnrwD2_BrF5EVC7vzz0L3LMi9Sxrbc_8w,12836 +pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=9gqDjs-IxsPhBYa5cpvv2FZ1KZlG83Giusy2lFmvIkE,14766 +pip/_vendor/html5lib/treewalkers/__init__.py,sha256=OBPtc1TU5mGyy18QDMxKEyYEz0wxFUUNj5v0-XgmYhY,5719 +pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-310.pyc,, +pip/_vendor/html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476 +pip/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413 +pip/_vendor/html5lib/treewalkers/etree.py,sha256=xo1L5m9VtkfpFJK0pFmkLVajhqYYVisVZn3k9kYpPkI,4551 +pip/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=_b0LAVWLcVu9WaU_-w3D8f0IRSpCbjf667V-3NRdhTw,6357 +pip/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309 +pip/_vendor/idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849 +pip/_vendor/idna/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/codec.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/core.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/idnadata.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/intranges.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/package_data.cpython-310.pyc,, +pip/_vendor/idna/__pycache__/uts46data.cpython-310.pyc,, +pip/_vendor/idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374 +pip/_vendor/idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321 +pip/_vendor/idna/core.py,sha256=RFIkY-HhFZaDoBEFjGwyGd_vWI04uOAQjnzueMWqwOU,12795 +pip/_vendor/idna/idnadata.py,sha256=fzMzkCea2xieVxcrjngJ-2pLsKQNejPCZFlBajIuQdw,44025 +pip/_vendor/idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881 +pip/_vendor/idna/package_data.py,sha256=szxQhV0ZD0nKJ84Kuobw3l8q4_KeCyXjFRdpwIpKZmw,21 +pip/_vendor/idna/uts46data.py,sha256=o-D7V-a0fOLZNd7tvxof6MYfUd0TBZzE2bLR5XO67xU,204400 +pip/_vendor/msgpack/__init__.py,sha256=2gJwcsTIaAtCM0GMi2rU-_Y6kILeeQuqRkrQ22jSANc,1118 +pip/_vendor/msgpack/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/_version.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/ext.cpython-310.pyc,, +pip/_vendor/msgpack/__pycache__/fallback.cpython-310.pyc,, +pip/_vendor/msgpack/_version.py,sha256=JpTcnRd3YUioA24NDtDZbLW0Nhl2yA-N1Rq2lLDBB-g,20 +pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081 +pip/_vendor/msgpack/ext.py,sha256=4l356Y4sVEcvCla2dh_cL57vh4GMhZfa3kuWHFHYz6A,6088 +pip/_vendor/msgpack/fallback.py,sha256=L5jriXysURbf6rPbbHbvXgvoFrKZiryIBmujMTcrf3A,34475 +pip/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661 +pip/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 +pip/_vendor/packaging/__pycache__/__about__.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/markers.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/tags.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/utils.cpython-310.pyc,, +pip/_vendor/packaging/__pycache__/version.cpython-310.pyc,, +pip/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 +pip/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378 +pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +pip/_vendor/packaging/markers.py,sha256=AJBOcY8Oq0kYc570KuuPTkvuqjAlhufaE2c9sCUbm64,8487 +pip/_vendor/packaging/requirements.py,sha256=NtDlPBtojpn1IUC85iMjPNsUmufjpSlwnNA-Xb4m5NA,4676 +pip/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110 +pip/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699 +pip/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 +pip/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 +pip/_vendor/pep517/__init__.py,sha256=Y1bATL2qbFNN6M_DQa4yyrwqjpIiL-j9T6kBmR0DS14,130 +pip/_vendor/pep517/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/build.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/check.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/colorlog.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/dirtools.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/envbuild.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/meta.cpython-310.pyc,, +pip/_vendor/pep517/__pycache__/wrappers.cpython-310.pyc,, +pip/_vendor/pep517/build.py,sha256=2bar6EdjwIz2Dlfy94qdxn3oA9mVnnny40mfoT5f-qI,3457 +pip/_vendor/pep517/check.py,sha256=bCORq1WrHjhpTONa-zpAqG0EB9rHNuhO1ORu6DsDuL8,6084 +pip/_vendor/pep517/colorlog.py,sha256=Tk9AuYm_cLF3BKTBoSTJt9bRryn0aFojIQOwbfVUTxQ,4098 +pip/_vendor/pep517/compat.py,sha256=NmLImE5oiDT3gbEhJ4w7xeoMFcpAPrGu_NltBytSJUY,1253 +pip/_vendor/pep517/dirtools.py,sha256=2mkAkAL0mRz_elYFjRKuekTJVipH1zTn4tbf1EDev84,1129 +pip/_vendor/pep517/envbuild.py,sha256=zFde--rmzjXMLXcm7SA_3hDtgk5VCTA8hjpk88RbF6E,6100 +pip/_vendor/pep517/in_process/__init__.py,sha256=MyWoAi8JHdcBv7yXuWpUSVADbx6LSB9rZh7kTIgdA8Y,563 +pip/_vendor/pep517/in_process/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pep517/in_process/__pycache__/_in_process.cpython-310.pyc,, +pip/_vendor/pep517/in_process/_in_process.py,sha256=D3waguyNSGcwosociD5USfcycYr2RCzCjYtxX5UHQmQ,11201 +pip/_vendor/pep517/meta.py,sha256=8mnM5lDnT4zXQpBTliJbRGfesH7iioHwozbDxALPS9Y,2463 +pip/_vendor/pep517/wrappers.py,sha256=impq7Cz_LL1iDF1iiOzYWB4MaEu6O6Gps7TJ5qsJz1Q,13429 +pip/_vendor/pkg_resources/__init__.py,sha256=NnpQ3g6BCHzpMgOR_OLBmYtniY4oOzdKpwqghfq_6ug,108287 +pip/_vendor/pkg_resources/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-310.pyc,, +pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562 +pip/_vendor/platformdirs/__init__.py,sha256=Aizpxewwd4nY63Gqw-Od1Rso9Ah4bSoc6rkx-GBRu2Y,12676 +pip/_vendor/platformdirs/__main__.py,sha256=ZmsnTxEOxtTvwa-Y_Vfab_JN3X4XCVeN8X0yyy9-qnc,1176 +pip/_vendor/platformdirs/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/android.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/api.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/macos.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/unix.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/version.cpython-310.pyc,, +pip/_vendor/platformdirs/__pycache__/windows.cpython-310.pyc,, +pip/_vendor/platformdirs/android.py,sha256=xhlD4NmrKCARe5lgnpBGYo4lOYxEOBOByNDNYy91gEE,4012 +pip/_vendor/platformdirs/api.py,sha256=MXKHXOL3eh_-trSok-JUTjAR_zjmmKF3rjREVABjP8s,4910 +pip/_vendor/platformdirs/macos.py,sha256=-3UXQewbT0yMhMdkzRXfXGAntmLIH7Qt4a9Hlf8I5_Y,2655 +pip/_vendor/platformdirs/unix.py,sha256=b4aVYTz0qZ50HntwOXo8r6tp82jAa3qTjxw-WlnC2yc,6910 +pip/_vendor/platformdirs/version.py,sha256=bXzLJCe23FNQRQrf7ZRWKejxWnct_wft7dxdkMGT33E,80 +pip/_vendor/platformdirs/windows.py,sha256=ISruopR5UGBePC0BxCxXevkZYfjJsIZc49YWU5iYfQ4,6439 +pip/_vendor/progress/__init__.py,sha256=1HejNZtv2ouUNQeStUDAtZrtwkz_3FmYKQ476hJ7zOs,5294 +pip/_vendor/progress/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/progress/__pycache__/bar.cpython-310.pyc,, +pip/_vendor/progress/__pycache__/colors.cpython-310.pyc,, +pip/_vendor/progress/__pycache__/counter.cpython-310.pyc,, +pip/_vendor/progress/__pycache__/spinner.cpython-310.pyc,, +pip/_vendor/progress/bar.py,sha256=GbedY0oZ-Q1duXjmvVLO0tSf-uTSH7hJ3zzyI91Esws,2942 +pip/_vendor/progress/colors.py,sha256=cCYXQnYFYVmQKKmYEbQ_lj6SPSFzdw4FN98F2x2kR-U,2655 +pip/_vendor/progress/counter.py,sha256=zYt9DWH0_05s8Q9TrJwHVud-WwsyyaR3PwYtk5hxwwQ,1613 +pip/_vendor/progress/spinner.py,sha256=u5ElzW94XEiLGH-aAlr54VJtKfeK745xr6UfGvvflzU,1461 +pip/_vendor/pygments/__init__.py,sha256=CAmA9UthykwxvtutUcH0IxqtiyQcSg6CmYdM-jKlcRY,3002 +pip/_vendor/pygments/__main__.py,sha256=X7rGLMUC54EXgO14FZ9goKXZDmhPzKXTsUglmb_McIU,353 +pip/_vendor/pygments/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/cmdline.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/console.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/filter.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/formatter.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/lexer.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/modeline.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/plugin.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/regexopt.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/scanner.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/sphinxext.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/style.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/token.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/unistring.cpython-310.pyc,, +pip/_vendor/pygments/__pycache__/util.cpython-310.pyc,, +pip/_vendor/pygments/cmdline.py,sha256=XpsyWgErcSqHC7rXiYKLF3Y61Uy8SR2DNQDDhZGuezg,23408 +pip/_vendor/pygments/console.py,sha256=QZXBUAkyl4dPLQ1e6XHjQu3mmXBWvuGQwsQT2q1mtCY,1697 +pip/_vendor/pygments/filter.py,sha256=35iMZiB1rcuogxokm92kViB2DPXPp_wWoxWuMmwvvzY,1938 +pip/_vendor/pygments/filters/__init__.py,sha256=-veOimzCyYGEARru2Dfo6ofSYcZ8tGsIVuMprtaZQ24,40292 +pip/_vendor/pygments/filters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/formatter.py,sha256=zSBbX2U_OOriy7SJvSTK6OAxjuXtROWxQlNpJEJZjBA,2917 +pip/_vendor/pygments/formatters/__init__.py,sha256=fjkYDy5-F998XczKi0ymHFayr5ObIRLHF8cgp9k8kpA,5119 +pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/groff.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/html.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/img.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/irc.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/latex.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/other.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/svg.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-310.pyc,, +pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-310.pyc,, +pip/_vendor/pygments/formatters/_mapping.py,sha256=3A1rYSjYN9MLduCFWy2_mYhllPVpwlw55anRYnPXX8w,6516 +pip/_vendor/pygments/formatters/bbcode.py,sha256=cSKMOioUnE4TzvCCsK4IbJ6G78W07ZwHtkz4V1Wte0U,3314 +pip/_vendor/pygments/formatters/groff.py,sha256=ULgMKvGeLswX0KZn3IBp0p0U3rruiSHBtpl6O5qbqLs,5005 +pip/_vendor/pygments/formatters/html.py,sha256=0jM7Jc4xA4tsjmPq35uklm_En_OVdcNb0__SEXp2pDQ,35330 +pip/_vendor/pygments/formatters/img.py,sha256=r4iag_jCfyv_LhIt-1fRDeVEEoAfVJzkD9nZChIwiS8,21819 +pip/_vendor/pygments/formatters/irc.py,sha256=gi_IeIZeNaTfTMtvseLigZdS6lNicN7r7O7rnI6myo0,5871 +pip/_vendor/pygments/formatters/latex.py,sha256=qZUerrHt2Nn2aB4gJcdqj99qBkIxl_1v1ukYsf230Gk,18930 +pip/_vendor/pygments/formatters/other.py,sha256=Q01LtkqPZ8m_EYdgMVzXPUGjHoL00lXI3By97wzytYU,5073 +pip/_vendor/pygments/formatters/pangomarkup.py,sha256=ZpjALTSuGFwviJd5kOYwr-1NgqxCX3XRJrjXC7x1UbQ,2212 +pip/_vendor/pygments/formatters/rtf.py,sha256=qh7-z_wbUsTY6z7fZUGrYECYBlWB0wEdBwIZVEVybL0,5014 +pip/_vendor/pygments/formatters/svg.py,sha256=T7Jj004I3JUPOr48aAhQ368K2qWCciUyMQ2tdU-LB-4,7335 +pip/_vendor/pygments/formatters/terminal.py,sha256=cRD5hitINOkYlGZo9ma252vpJYPSGNgLivrsm6zGyec,4674 +pip/_vendor/pygments/formatters/terminal256.py,sha256=Bvz9zZL3UWc94TDm1GhKMI4x0BTit0XplhyRL0zmtkw,11753 +pip/_vendor/pygments/lexer.py,sha256=ECXWlEsbRnKs_njozZns6BGQ4riTMzct_BzAr3zV6dY,31937 +pip/_vendor/pygments/lexers/__init__.py,sha256=6Ds0GVBP3jrIU02wmjRdpoL4eFGhwT2IVD1zf3cV5_Y,11307 +pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-310.pyc,, +pip/_vendor/pygments/lexers/__pycache__/python.cpython-310.pyc,, +pip/_vendor/pygments/lexers/_mapping.py,sha256=jAxmvh5wvNkD-p3Fh6E7hY_B0sGbcxWRfseT6iq7ex4,70032 +pip/_vendor/pygments/lexers/python.py,sha256=LXnk43Lcngqn9xj6eRqdk2f73oF4kHZWiwgHMM_RlVM,52776 +pip/_vendor/pygments/modeline.py,sha256=37fen3cf1moCz4vMVJqX41eAQCmj8pzUchikgPcHp-U,986 +pip/_vendor/pygments/plugin.py,sha256=zGSig3S7QX-3o6RDxd4_Uvice_t25l_BN9aQQ9k8vmU,1727 +pip/_vendor/pygments/regexopt.py,sha256=mj8Fgu3sT0d5PZwRwDLexEvVOQbuHeosubQnqVwgiqs,3072 +pip/_vendor/pygments/scanner.py,sha256=nGoHy-Npk2ylUd4bws_CJN1hK785Xqo8e0teRmNX2jo,3091 +pip/_vendor/pygments/sphinxext.py,sha256=FZ2puvLe2Bztqtj6UJvQd7D8TvtOZ1GsfRJObvH59tE,4630 +pip/_vendor/pygments/style.py,sha256=lGyan5bU42q1kGMfFqafwL3g1j5EurTvfkv8vdP7NzQ,6257 +pip/_vendor/pygments/styles/__init__.py,sha256=Qx2zq6ufbDNE2cTp51M-s9zW-sDE-KLIqFw31qr3Bhg,3252 +pip/_vendor/pygments/styles/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pygments/token.py,sha256=lNPgeaQTzu2DEUi6n_lxAIU7uy4DVj8LMI3nSVnTjks,6143 +pip/_vendor/pygments/unistring.py,sha256=Xs0FzOzE0l0iWRoTlcgi-Q_kAMdF5Gt5FL_goGKJc98,63188 +pip/_vendor/pygments/util.py,sha256=s9n8BQXIxG3lIwCPWv5-ci8yhaqq5JbEVK9v8Z-8_3I,9123 +pip/_vendor/pyparsing/__init__.py,sha256=jXheGTFT1b6r_4WxuOE0uVUqiouLJ3WHzOScpLieRgQ,9107 +pip/_vendor/pyparsing/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/actions.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/common.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/core.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/helpers.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/results.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/testing.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/unicode.cpython-310.pyc,, +pip/_vendor/pyparsing/__pycache__/util.cpython-310.pyc,, +pip/_vendor/pyparsing/actions.py,sha256=60v7mETOBzc01YPH_qQD5isavgcSJpAfIKpzgjM3vaU,6429 +pip/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936 +pip/_vendor/pyparsing/core.py,sha256=GtQsD06HlwKPc7M8K8hyOuOW-cRnd87AxAHq-ad5lEk,212248 +pip/_vendor/pyparsing/diagram/__init__.py,sha256=h0gsUwmo5N3shgvfXVQTtqvTpUAv-ZdQjSQ6IUJmsxY,22165 +pip/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/pyparsing/exceptions.py,sha256=H4D9gqMavqmAFSsdrU_J6bO-jA-T-A7yvtXWZpooIUA,9030 +pip/_vendor/pyparsing/helpers.py,sha256=kqpIZFG-y0fQ3g_TmloYllo9we6YCYiewZMXIK0y5wc,38299 +pip/_vendor/pyparsing/results.py,sha256=4D-oURF1cLeL7k0d3zMqUuWH_gTjop_OrZwik9O0HXU,25339 +pip/_vendor/pyparsing/testing.py,sha256=szs8AKZREZMhL0y0vsMfaTVAnpqPHetg6VKJBNmc4QY,13388 +pip/_vendor/pyparsing/unicode.py,sha256=IR-ioeGY29cZ49tG8Ts7ITPWWNP5G2DcZs58oa8zn44,10381 +pip/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805 +pip/_vendor/requests/__init__.py,sha256=6IUFQM6K9V2NIu4fe4LtUsN21-TFbw_w3EfPpdUN-qc,5130 +pip/_vendor/requests/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/__version__.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/_internal_utils.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/adapters.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/api.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/auth.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/certs.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/compat.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/cookies.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/help.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/hooks.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/models.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/packages.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/sessions.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/status_codes.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/structures.cpython-310.pyc,, +pip/_vendor/requests/__pycache__/utils.cpython-310.pyc,, +pip/_vendor/requests/__version__.py,sha256=q8miOQaomOv3S74lK4eQs1zZ5jwcnOusyEU-M2idhts,441 +pip/_vendor/requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096 +pip/_vendor/requests/adapters.py,sha256=WazYJQ_b2LHhNDb_y0hscNlWVsSe5ca5I3pymPrer5w,21861 +pip/_vendor/requests/api.py,sha256=hjuoP79IAEmX6Dysrw8t032cLfwLHxbI_wM4gC5G9t0,6402 +pip/_vendor/requests/auth.py,sha256=OMoJIVKyRLy9THr91y8rxysZuclwPB-K1Xg1zBomUhQ,10207 +pip/_vendor/requests/certs.py,sha256=nXRVq9DtGmv_1AYbwjTu9UrgAcdJv05ZvkNeaoLOZxY,465 +pip/_vendor/requests/compat.py,sha256=N1281mkcTluMjKqCSLf88LR6HNOygEhS1TbR9LLsoVY,2114 +pip/_vendor/requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430 +pip/_vendor/requests/exceptions.py,sha256=VcpBXOL-9JYhNbK8OZxCIImBgpQSXJlUelDPf1f-pmM,3446 +pip/_vendor/requests/help.py,sha256=dyhe3lcmHXnFCzDiZVjcGmVvvO_jtsfAm-AC542ndw8,3972 +pip/_vendor/requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757 +pip/_vendor/requests/models.py,sha256=7pzscX_47qxx7-zEaBWGxMoB33Vdf6HLoUKZh1ktEvM,35116 +pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695 +pip/_vendor/requests/sessions.py,sha256=Zu-Y9YPlwTIsyFx1hvIrc3ziyeFpuFPqcOuSuz8BNWs,29835 +pip/_vendor/requests/status_codes.py,sha256=gT79Pbs_cQjBgp-fvrUgg1dn2DQO32bDj4TInjnMPSc,4188 +pip/_vendor/requests/structures.py,sha256=msAtr9mq1JxHd-JRyiILfdFlpbJwvvFuP3rfUQT_QxE,3005 +pip/_vendor/requests/utils.py,sha256=siud-FQ6xgKFbL49DRvAb3PMQMMHoeCL_TCmuHh9AUU,33301 +pip/_vendor/resolvelib/__init__.py,sha256=UL-B2BDI0_TRIqkfGwLHKLxY-LjBlomz7941wDqzB1I,537 +pip/_vendor/resolvelib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/providers.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/reporters.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/resolvers.cpython-310.pyc,, +pip/_vendor/resolvelib/__pycache__/structs.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/resolvelib/compat/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/__pycache__/collections_abc.cpython-310.pyc,, +pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156 +pip/_vendor/resolvelib/providers.py,sha256=roVmFBItQJ0TkhNua65h8LdNny7rmeqVEXZu90QiP4o,5872 +pip/_vendor/resolvelib/reporters.py,sha256=fW91NKf-lK8XN7i6Yd_rczL5QeOT3sc6AKhpaTEnP3E,1583 +pip/_vendor/resolvelib/resolvers.py,sha256=2wYzVGBGerbmcIpH8cFmgSKgLSETz8jmwBMGjCBMHG4,17592 +pip/_vendor/resolvelib/structs.py,sha256=IVIYof6sA_N4ZEiE1C1UhzTX495brCNnyCdgq6CYq28,4794 +pip/_vendor/rich/__init__.py,sha256=wF1th4JGBCVC02xfaw8j6P2MrFcJaQJL72scKtEmDYQ,5804 +pip/_vendor/rich/__main__.py,sha256=vd1PP-o7_1un-ThdgMU9LHV-D8z56yz_-fryczn38eE,8810 +pip/_vendor/rich/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/__main__.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_cell_widths.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_emoji_codes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_emoji_replace.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_extension.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_inspect.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_log_render.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_loop.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_lru_cache.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_palettes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_pick.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_ratio.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_spinners.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_stack.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_timer.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_windows.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/_wrap.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/abc.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/align.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/ansi.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/bar.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/box.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/cells.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/color.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/color_triplet.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/columns.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/console.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/constrain.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/containers.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/control.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/default_styles.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/diagnose.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/emoji.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/errors.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/file_proxy.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/filesize.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/highlighter.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/json.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/jupyter.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/layout.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/live.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/live_render.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/logging.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/markup.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/measure.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/padding.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/pager.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/palette.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/panel.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/pretty.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/progress.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/progress_bar.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/prompt.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/protocol.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/region.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/repr.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/rule.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/scope.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/screen.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/segment.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/spinner.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/status.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/style.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/styled.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/syntax.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/table.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/tabulate.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/terminal_theme.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/text.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/theme.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/themes.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/traceback.cpython-310.pyc,, +pip/_vendor/rich/__pycache__/tree.cpython-310.pyc,, +pip/_vendor/rich/_cell_widths.py,sha256=2n4EiJi3X9sqIq0O16kUZ_zy6UYMd3xFfChlKfnW1Hc,10096 +pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235 +pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064 +pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265 +pip/_vendor/rich/_inspect.py,sha256=vq6BjewwEvddjcBTr_lCcjYQBsKi92aTNpcXyaA5ERA,7444 +pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225 +pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236 +pip/_vendor/rich/_lru_cache.py,sha256=M7H1ZQF32o6SxrpOur9zTIhEHlNXT9XnrcdhruUmG5I,1246 +pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063 +pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423 +pip/_vendor/rich/_ratio.py,sha256=2lLSliL025Y-YMfdfGbutkQDevhcyDqc-DtUYW9mU70,5472 +pip/_vendor/rich/_spinners.py,sha256=huT1biTlwyp9Lm8S7bLfVzg1psUaIH5xHDwTaWEHVh0,26521 +pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351 +pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417 +pip/_vendor/rich/_windows.py,sha256=nBO71icHMIHlzT7hg6fkoIdh1mT-5MvDdPDwunkshyw,2065 +pip/_vendor/rich/_wrap.py,sha256=OtnSxnERkuNlSM1d_MYtNg8KIYTcTBk3peg16dCZH_U,1804 +pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890 +pip/_vendor/rich/align.py,sha256=2zRHV8SzR5eP-vQkSDgjmgsBLBluCBwykgejAW6oRD0,10425 +pip/_vendor/rich/ansi.py,sha256=QaVVkfvVL6C3OsuWI9iQ-iJFkMsMohjYlxgMLnVTEPo,6676 +pip/_vendor/rich/bar.py,sha256=a7UD303BccRCrEhGjfMElpv5RFYIinaAhAuqYqhUvmw,3264 +pip/_vendor/rich/box.py,sha256=o0ywz1iW0WjGLPrRVDAZPh1CVPEgAOaWsn8Bf3sf43g,9069 +pip/_vendor/rich/cells.py,sha256=NadN20gFxE8Aj-2S3Drn7qgn-ZpsRZcNnTNtweRL7rA,4285 +pip/_vendor/rich/color.py,sha256=SD3yTf3t8japb-jOv8GYCMCDqyzpipzXS_0rAXhSlU4,17285 +pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054 +pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131 +pip/_vendor/rich/console.py,sha256=bioCy8012eZ8PIOBxMyyqxYPltKk2pGEG9jmwylNCQk,81236 +pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288 +pip/_vendor/rich/containers.py,sha256=aKgm5UDHn5Nmui6IJaKdsZhbHClh_X7D-_Wg8Ehrr7s,5497 +pip/_vendor/rich/control.py,sha256=qxg6Yjd78XuF0VxthlT8O4dpvpACYwKkBfm2S4-IvHA,5298 +pip/_vendor/rich/default_styles.py,sha256=At42PcWzmnYWcx5fUOKyOUpI8HK5m4ItZqxkgHToaMs,7614 +pip/_vendor/rich/diagnose.py,sha256=4L8SZfbqjIRotzJ39QzD9-d4I80FyV1mNKHryg1eArE,183 +pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501 +pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642 +pip/_vendor/rich/file_proxy.py,sha256=fHeReSO3VJ7IbH_9ri-OrPYbFC3UYOzeTNjngiiWOcY,1613 +pip/_vendor/rich/filesize.py,sha256=oQJnM5_7ygkpzt3GtNq5l3F6gmB7YahBA5vpdQVKLwI,2511 +pip/_vendor/rich/highlighter.py,sha256=AdhjC0meTYswZ_xKgka0cRYdNjLABLUzHAbyF3QpPWo,4894 +pip/_vendor/rich/json.py,sha256=RCm4lXBXrjvXHpqrWPH8wdGP0jEo4IohLmkddlhRY18,5051 +pip/_vendor/rich/jupyter.py,sha256=4sxNAwJs4g3dYfWy_enPw9fp0Tdn-82tV4T9uh9vAOM,3025 +pip/_vendor/rich/layout.py,sha256=b64KMDP2EPiC103P-v-_VZKGY13oWiiGS418P_KRRlc,14048 +pip/_vendor/rich/live.py,sha256=OKxMaFU5sFfuR--cJftGYjSvg1VPQri1U_DNZUjCsvI,13711 +pip/_vendor/rich/live_render.py,sha256=zElm3PrfSIvjOce28zETHMIUf9pFYSUA5o0AflgUP64,3667 +pip/_vendor/rich/logging.py,sha256=YNcCSK6pCo2Wg6JKqScAe6VgFqebHBnS5nDnBO4gXAA,10868 +pip/_vendor/rich/markup.py,sha256=hsVW_k1TIvj5OPPQ12ihAii9HSVa8N1TStvA5B2GGpo,8058 +pip/_vendor/rich/measure.py,sha256=Z74XvzIgLZm0xH-QIo1uX5d4oahavHe8D8MKyxLNqPQ,5258 +pip/_vendor/rich/padding.py,sha256=kTFGsdGe0os7tXLnHKpwTI90CXEvrceeZGCshmJy5zw,4970 +pip/_vendor/rich/pager.py,sha256=VK_2EfH0JduZWdyV-KZma06bvi_V5PWmHG6W7BoiaTg,838 +pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396 +pip/_vendor/rich/panel.py,sha256=O6ORyIhDcOLSEasTjpcDvmhvIcppPGCeQoXpoycIUT8,8637 +pip/_vendor/rich/pretty.py,sha256=HAB68BpYysaL1EXeV4X5Tt-U2hDlcLpbFz06fkojWWE,32572 +pip/_vendor/rich/progress.py,sha256=jcgi7aMnQ_YjSpAmQkalwtNsgVn9i56SeZGprr7tuOk,35926 +pip/_vendor/rich/progress_bar.py,sha256=ELiBaxJOgsRYKpNIrot7BC0bFXvmf8cTd6nxI02BbK0,7762 +pip/_vendor/rich/prompt.py,sha256=gKVd13YWv6jedzwcRPZGUINBjC-xcJhJ_xz_NvMW80c,11307 +pip/_vendor/rich/protocol.py,sha256=Vx6n4fEoSDhzSup8t3KH0iK2RWyssIOks5E0S1qw1GA,1401 +pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166 +pip/_vendor/rich/repr.py,sha256=1A0U0_ibG_bZbw71pUBIctO9Az-CQUuyOTbiKcJOwyw,4309 +pip/_vendor/rich/rule.py,sha256=cPK6NYo4kzh-vM_8a-rXajXplsbaHa6ahErYvGSsrJ0,4197 +pip/_vendor/rich/scope.py,sha256=HX13XsJfqzQHpPfw4Jn9JmJjCsRj9uhHxXQEqjkwyLA,2842 +pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591 +pip/_vendor/rich/segment.py,sha256=MBBAWaHyqCQFCfiNbrTW4BGaFR1uU31XktJ1S3Taqb4,23916 +pip/_vendor/rich/spinner.py,sha256=V6dW0jIk5IO0_2MyxyftQf5VjCHI0T2cRhJ4F31hPIQ,4312 +pip/_vendor/rich/status.py,sha256=gJsIXIZeSo3urOyxRUjs6VrhX5CZrA0NxIQ-dxhCnwo,4425 +pip/_vendor/rich/style.py,sha256=AD1I7atfclsFCtGeL8ronH1Jj-02WLp9ZQ2VYqmpBjM,26469 +pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258 +pip/_vendor/rich/syntax.py,sha256=pJAD08ywowg5xVwTGCqUOMpDYskjoMoDYEV-hryEX5s,26994 +pip/_vendor/rich/table.py,sha256=oQAEBaV4zMUPyg_tSA93_GrCirdIf-osolxf9wb3pEo,36757 +pip/_vendor/rich/tabulate.py,sha256=nl0oeNbiXectEgTHyj3K7eN4NZMISpaogpOdZyEOGbs,1700 +pip/_vendor/rich/terminal_theme.py,sha256=E0nI_ycFpvflamt-KVCY4J52LmUjRi1Y6ICB-Ef3gMo,1459 +pip/_vendor/rich/text.py,sha256=auX3LpY-I6PBiNyxB3o3LyMEx7lna2cx9IbNQJDwtw8,44424 +pip/_vendor/rich/theme.py,sha256=GKNtQhDBZKAzDaY0vQVQQFzbc0uWfFe6CJXA-syT7zQ,3627 +pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102 +pip/_vendor/rich/traceback.py,sha256=hAU3IR295eFuup_px2NU4aCEWu7KQs1qpZbnqoHCtR0,25935 +pip/_vendor/rich/tree.py,sha256=JxyWbc27ZuwoLQnd7I-rSsRsqI9lzaVKlfTLJXla9U0,9122 +pip/_vendor/six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549 +pip/_vendor/tenacity/__init__.py,sha256=GLLsTFD4Bd5VDgTR6mU_FxyOsrxc48qONorVaRebeD4,18257 +pip/_vendor/tenacity/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/_asyncio.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/_utils.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/after.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/before.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/before_sleep.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/nap.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/retry.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/stop.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/tornadoweb.cpython-310.pyc,, +pip/_vendor/tenacity/__pycache__/wait.cpython-310.pyc,, +pip/_vendor/tenacity/_asyncio.py,sha256=HEb0BVJEeBJE9P-m9XBxh1KcaF96BwoeqkJCL5sbVcQ,3314 +pip/_vendor/tenacity/_utils.py,sha256=-y68scDcyoqvTJuJJ0GTfjdSCljEYlbCYvgk7nM4NdM,1944 +pip/_vendor/tenacity/after.py,sha256=dlmyxxFy2uqpLXDr838DiEd7jgv2AGthsWHGYcGYsaI,1496 +pip/_vendor/tenacity/before.py,sha256=7XtvRmO0dRWUp8SVn24OvIiGFj8-4OP5muQRUiWgLh0,1376 +pip/_vendor/tenacity/before_sleep.py,sha256=ThyDvqKU5yle_IvYQz_b6Tp6UjUS0PhVp6zgqYl9U6Y,1908 +pip/_vendor/tenacity/nap.py,sha256=fRWvnz1aIzbIq9Ap3gAkAZgDH6oo5zxMrU6ZOVByq0I,1383 +pip/_vendor/tenacity/retry.py,sha256=62R71W59bQjuNyFKsDM7hE2aEkEPtwNBRA0tnsEvgSk,6645 +pip/_vendor/tenacity/stop.py,sha256=sKHmHaoSaW6sKu3dTxUVKr1-stVkY7lw4Y9yjZU30zQ,2790 +pip/_vendor/tenacity/tornadoweb.py,sha256=E8lWO2nwe6dJgoB-N2HhQprYLDLB_UdSgFnv-EN6wKE,2145 +pip/_vendor/tenacity/wait.py,sha256=e_Saa6I2tsNLpCL1t9897wN2fGb0XQMQlE4bU2t9V2w,6691 +pip/_vendor/tomli/__init__.py,sha256=z1Elt0nLAqU5Y0DOn9p__8QnLWavlEOpRyQikdYgKro,230 +pip/_vendor/tomli/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/tomli/__pycache__/_parser.cpython-310.pyc,, +pip/_vendor/tomli/__pycache__/_re.cpython-310.pyc,, +pip/_vendor/tomli/_parser.py,sha256=50BD4o9YbzFAGAYyZLqZC8F81DQ7iWWyJnrHNwBKa6A,22415 +pip/_vendor/tomli/_re.py,sha256=5GPfgXKteg7wRFCF-DzlkAPI2ilHbkMK2-JC49F-AJQ,2681 +pip/_vendor/typing_extensions.py,sha256=1uqi_RSlI7gos4eJB_NEV3d5wQwzTUQHd3_jrkbTo8Q,87149 +pip/_vendor/urllib3/__init__.py,sha256=j3yzHIbmW7CS-IKQJ9-PPQf_YKO8EOAey_rMW0UR7us,2763 +pip/_vendor/urllib3/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/_collections.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/_version.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/connection.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/connectionpool.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/exceptions.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/fields.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/filepost.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/poolmanager.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/request.cpython-310.pyc,, +pip/_vendor/urllib3/__pycache__/response.cpython-310.pyc,, +pip/_vendor/urllib3/_collections.py,sha256=pyASJJhW7wdOpqJj9QJA8FyGRfr8E8uUUhqUvhF0728,11372 +pip/_vendor/urllib3/_version.py,sha256=_NdMUQaeBvFHAX2z3zAIX2Wum58A6rVtY1f7ByHsQ4g,63 +pip/_vendor/urllib3/connection.py,sha256=6zokyboYYKm9VkyrQvVVLgxMyCZK7n9Vmg_2ZK6pbhc,20076 +pip/_vendor/urllib3/connectionpool.py,sha256=eQ1jWJ2dDdRADuCj9Yx7RCpzY2iM8P32jGHbjYBkAIk,39308 +pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957 +pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-310.pyc,, +pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632 +pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922 +pip/_vendor/urllib3/contrib/appengine.py,sha256=lfzpHFmJiO82shClLEm3QB62SYgHWnjpZOH_2JhU5Tc,11034 +pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=ej9gGvfAb2Gt00lafFp45SIoRz-QwrQ4WChm6gQmAlM,4538 +pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=DD4pInv_3OEEGffEFynBoirc8ldR789sLmGSKukzA0E,16900 +pip/_vendor/urllib3/contrib/securetransport.py,sha256=4qUKo7PUV-vVIqXmr2BD-sH7qplB918jiD5eNsRI9vU,34449 +pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097 +pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217 +pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579 +pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440 +pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/packages/__pycache__/six.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-310.pyc,, +pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417 +pip/_vendor/urllib3/packages/six.py,sha256=1LVW7ljqRirFlfExjwl-v1B7vSAUNTmzGMs-qays2zg,34666 +pip/_vendor/urllib3/poolmanager.py,sha256=xfVcBtEBc8Xwa8jURSqdS7QmXvUuMHhjL1sjFOY-rUk,20001 +pip/_vendor/urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985 +pip/_vendor/urllib3/response.py,sha256=hGhGBh7TkEkh_IQg5C1W_xuPNrgIKv5BUXPyE-q0LuE,28203 +pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155 +pip/_vendor/urllib3/util/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/connection.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/proxy.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/queue.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/request.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/response.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/retry.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/ssltransport.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/timeout.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/url.cpython-310.pyc,, +pip/_vendor/urllib3/util/__pycache__/wait.cpython-310.pyc,, +pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901 +pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605 +pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498 +pip/_vendor/urllib3/util/request.py,sha256=NnzaEKQ1Pauw5MFMV6HmgEMHITf0Aua9fQuzi2uZzGc,4123 +pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510 +pip/_vendor/urllib3/util/retry.py,sha256=eUKOZ16Ya_Tu3_sXF5KVhLJmHQF7YXOCX-MWRoZVzqs,22011 +pip/_vendor/urllib3/util/ssl_.py,sha256=X4-AqW91aYPhPx6-xbf66yHFQKbqqfC_5Zt4WkLX1Hc,17177 +pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=w01jCYuwvQ038p9mhc1P1gF8IiTN1qHakThpoukOlbw,5751 +pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895 +pip/_vendor/urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003 +pip/_vendor/urllib3/util/url.py,sha256=QVEzcbHipbXyCWwH6R4K4TR-N8T4LM55WEMwNUTBmLE,14047 +pip/_vendor/urllib3/util/wait.py,sha256=3MUKRSAUJDB2tgco7qRUskW0zXGAWYvRRE4Q1_6xlLs,5404 +pip/_vendor/vendor.txt,sha256=H-9fScoah7nx4K8O4Uft0l5iH2P_mVo4RqyuMVOTJEc,496 +pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579 +pip/_vendor/webencodings/__pycache__/__init__.cpython-310.pyc,, +pip/_vendor/webencodings/__pycache__/labels.cpython-310.pyc,, +pip/_vendor/webencodings/__pycache__/mklabels.cpython-310.pyc,, +pip/_vendor/webencodings/__pycache__/tests.cpython-310.pyc,, +pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-310.pyc,, +pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979 +pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305 +pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563 +pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307 +pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286 diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/REQUESTED b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..c4ad521282bcf87dc1b9c67535cef48755462217 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pip = pip._internal.cli.main:main +pip3 = pip._internal.cli.main:main +pip3.10 = pip._internal.cli.main:main + diff --git a/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pip-22.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 b/venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..47f28619b3aab72df5480d0140d9f43ea3859d30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14afb3129b1a8b50bc40a3b0820c7f1152ea9bc10121aab152943f7057472886 +size 2686065 diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/LICENSE b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c467ac089a1caa6610d1379fe6a48132c15dcd5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/METADATA b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c4d360b4a8bb101f56769598a05172be56fb258d --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/METADATA @@ -0,0 +1,240 @@ +Metadata-Version: 2.1 +Name: typepy +Version: 1.3.2 +Summary: typepy is a Python library for variable type checker/validator/converter at a run time. +Home-page: https://github.com/thombashi/typepy +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Documentation, https://typepy.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/typepy +Project-URL: Tracker, https://github.com/thombashi/typepy/issues +Project-URL: Changlog, https://github.com/thombashi/typepy/releases +Keywords: library,type-checking,type-conversion,validator +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: mbstrdecoder <2,>=1.0.0 +Provides-Extra: datetime +Requires-Dist: python-dateutil <3.0.0,>=2.8.0 ; extra == 'datetime' +Requires-Dist: pytz >=2018.9 ; extra == 'datetime' +Requires-Dist: packaging ; extra == 'datetime' +Provides-Extra: test +Requires-Dist: pytest >=6.0.1 ; extra == 'test' +Requires-Dist: tcolorpy ; extra == 'test' +Requires-Dist: python-dateutil <3.0.0,>=2.8.0 ; extra == 'test' +Requires-Dist: pytz >=2018.9 ; extra == 'test' +Requires-Dist: packaging ; extra == 'test' + +.. contents:: **typepy** + :backlinks: top + :depth: 2 + +Summary +========= +`typepy `__ is a Python library for variable type checker/validator/converter at a run time. + +.. image:: https://badge.fury.io/py/typepy.svg + :target: https://badge.fury.io/py/typepy + :alt: PyPI package version + +.. image:: https://anaconda.org/conda-forge/typepy/badges/version.svg + :target: https://anaconda.org/conda-forge/typepy + :alt: conda-forge package version + +.. image:: https://img.shields.io/pypi/pyversions/typepy.svg + :target: https://pypi.org/project/typepy + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/typepy.svg + :target: https://pypi.org/project/typepy + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/typepy/workflows/Tests/badge.svg + :target: https://github.com/thombashi/typepy/actions?query=workflow%3ATests + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/typepy/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/typepy?branch=master + :alt: Test coverage + +.. image:: https://github.com/thombashi/typepy/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/typepy/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + +Features +========== +- checking a value type +- validate a value for a type +- convert a value from one type to the other type + +The correspondence between Python types and ``typepy`` classes are as follows: + +.. table:: Supported Types + + ================================================ ======================================================================================================= + Python Type typepy: Type Class + ================================================ ======================================================================================================= + ``bool`` `Bool `__ + ``datetime`` `DateTime `__ + ``dict`` `Dictionary `__ + ``float``/``decimal.Decimal`` (not infinity/NaN) `RealNumber `__ + ``float``/``decimal.Decimal`` (infinity) `Infinity `__ + ``float``/``decimal.Decimal`` (NaN) `Nan `__ + ``int`` `Integer `__ + ``list`` `List `__ + ``None`` `None `__ + ``str`` (not null) `String `__ + ``str`` (null) `NullString `__ + ``str`` (IP address) `IpAddress `__ + ================================================ ======================================================================================================= + +Installation +============ + +Installation: pip +------------------------------ +:: + + pip install typepy + +Install additional dependency packages with the following command if using ``typepy.DateTime`` class + +:: + + pip install typepy[datetime] + +Installation: conda +------------------------------ +:: + + conda install -c conda-forge typepy + +Installation: apt +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-typepy + + +Dependencies +============ +- Python 3.7+ +- `Python package dependencies (automatically installed) `__ + +Optional dependencies +---------------------------------- +These packages can be installed via ``pip install typepy[datetime]``: + +- `python-dateutil `__ +- `pytz `__ + +Usage +======= +Type Check Method +---------------------- +:Examples: + .. code-block:: pycon + + >>> from typepy import Integer + >>> Integer(1).is_type() + True + >>> Integer(1.1).is_type() + False + + +Type Validation Method +-------------------------------------------- +:Examples: + .. code-block:: pycon + + >>> from typepy import Integer + >>> Integer(1).validate() + >>> try: + ... Integer(1.1).validate() + ... except TypeError as e: + ... # validate() raised TypeError when the value unmatched the type class + ... print(e) + ... + invalid value type: expected=INTEGER, actual= + + +Type Conversion Methods +-------------------------------------------- + +convert method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Examples: + .. code-block:: pycon + + >>> from typepy import Integer, TypeConversionError + >>> Integer("1").convert() + 1 + >>> try: + ... Integer(1.1).convert() + ... except TypeConversionError as e: + ... # convert() raised TypeConversionError when conversion failed + ... print(e) + ... + failed to convert from float to INTEGER + +try_convert method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Examples: + .. code-block:: pycon + + >>> from typepy import Integer + >>> Integer("1").try_convert() + 1 + >>> print(Integer(1.1).try_convert()) # try_convert() returned None when conversion failed + None + +force_convert +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:Examples: + .. code-block:: pycon + + >>> from typepy import Integer, TypeConversionError + >>> Integer("1").force_convert() # force_convert() forcibly convert the value + 1 + >>> Integer(1.1).force_convert() + 1 + >>> try: + ... Integer("abc").force_convert() + ... except TypeConversionError as e: + ... # force_convert() raised TypeConversionError when the value was not convertible + ... print(e) + ... + failed to force_convert to int: type= + + +For more information +-------------------------------------------- +Type check/validate/convert results differed according to +``strict_level`` value which can pass to typepy class constructors as an argument. +More information can be found in the +`API reference `__. + +Documentation +=============== +https://typepy.rtfd.io/ + diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/RECORD b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..1c817ea2f7d30d515237e02a6c044b4b8cfdf439 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/RECORD @@ -0,0 +1,107 @@ +typepy-1.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +typepy-1.3.2.dist-info/LICENSE,sha256=vrvfBSShR_iaYV__U9eb3JDLx2MVUPtLclzT873NJPY,1074 +typepy-1.3.2.dist-info/METADATA,sha256=cGfwlvtjr-CbCpbIGheSP46P82OT7ki6Gof2jZEdhKo,9309 +typepy-1.3.2.dist-info/RECORD,, +typepy-1.3.2.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +typepy-1.3.2.dist-info/top_level.txt,sha256=JS3pVzz8HrmCDbSyNrOs7vCirWUXl5es6HfIxEtbP2M,7 +typepy/__init__.py,sha256=dbTD5m3Nf_tMD4lWG7FAxqdUMC0dZgtgale52Oe57f0,1129 +typepy/__pycache__/__init__.cpython-310.pyc,, +typepy/__pycache__/__version__.cpython-310.pyc,, +typepy/__pycache__/_common.cpython-310.pyc,, +typepy/__pycache__/_const.cpython-310.pyc,, +typepy/__pycache__/_function.cpython-310.pyc,, +typepy/__pycache__/_typecode.cpython-310.pyc,, +typepy/__pycache__/error.cpython-310.pyc,, +typepy/__version__.py,sha256=bZfSgZ3naje3Nx6ysBkhI-QAm434QZYGFu90lJBPKUI,201 +typepy/_common.py,sha256=NV8Cr2hVr4zs7kkU2pZjPOXON8YQgn8JJptYh9j-JR0,401 +typepy/_const.py,sha256=CPuhx_t7xV5QdCJ6UKGvglDmZBoJIEIt5kSFV7pxGUo,312 +typepy/_function.py,sha256=MsE-BOyhhSPkupgDFPMRlz2YQB9pF0c-fnxhYDROglo,1096 +typepy/_typecode.py,sha256=Oi_zWK5ULiyCjmjb1Urtb3bm-J2LYqyX2TqiCmizuPs,397 +typepy/checker/__init__.py,sha256=Aj1kUaY7OQZd61SW_sktpim10dFkkPD5v8CkQ4mqEe4,1000 +typepy/checker/__pycache__/__init__.cpython-310.pyc,, +typepy/checker/__pycache__/_bool.cpython-310.pyc,, +typepy/checker/__pycache__/_bytes.cpython-310.pyc,, +typepy/checker/__pycache__/_checker.cpython-310.pyc,, +typepy/checker/__pycache__/_common.cpython-310.pyc,, +typepy/checker/__pycache__/_datetime.cpython-310.pyc,, +typepy/checker/__pycache__/_dictionary.cpython-310.pyc,, +typepy/checker/__pycache__/_infinity.cpython-310.pyc,, +typepy/checker/__pycache__/_integer.cpython-310.pyc,, +typepy/checker/__pycache__/_interface.cpython-310.pyc,, +typepy/checker/__pycache__/_ipaddress.cpython-310.pyc,, +typepy/checker/__pycache__/_list.cpython-310.pyc,, +typepy/checker/__pycache__/_nan.cpython-310.pyc,, +typepy/checker/__pycache__/_none.cpython-310.pyc,, +typepy/checker/__pycache__/_realnumber.cpython-310.pyc,, +typepy/checker/__pycache__/_string.cpython-310.pyc,, +typepy/checker/_bool.py,sha256=O6EITPb7OsdZOApFcdRMJADFV0N59D3ulsJkAPFzW9o,1198 +typepy/checker/_bytes.py,sha256=mYmu2ksd0h9qoBsO3SaVm9ple8edQOpV6s3RErjHscA,646 +typepy/checker/_checker.py,sha256=TxeIAXVIBXukmd1881PeXGNW7ZNapsPA9vS53wwVKck,2485 +typepy/checker/_common.py,sha256=PpinqXPbViDkTZ-HSXqemU01EeeSvaoUDuCwndPGbRc,539 +typepy/checker/_datetime.py,sha256=TffEQDoClczSF6P4o29yMbzLl-yzIAIccWXplBybFtY,1159 +typepy/checker/_dictionary.py,sha256=qXhH9plLosj967PEGCTgZvRbHZoreomnPQV6SCu1GC8,885 +typepy/checker/_infinity.py,sha256=CRgTptMZLRHzPjp93ZtgHpYM3WpV-D4p7aLt1ZyttNM,916 +typepy/checker/_integer.py,sha256=eBzHsd4xWprtzaHJglRvET_Dhi0KqrstlWyT5eCKJ88,2002 +typepy/checker/_interface.py,sha256=IiMShH1pAWhl_6JlUxtb10ofao2sUspUlGxDC1nnZps,315 +typepy/checker/_ipaddress.py,sha256=hqPXXD50x-ndLKj-DPjfLCZscN1CXawMTvOodSc1bsc,1070 +typepy/checker/_list.py,sha256=iGkycr08dVONSkerYl51WfYyidlzI3JfN5LJ_zwKU5U,985 +typepy/checker/_nan.py,sha256=SBTyHoTKtQO-wIMZ5lW-y88CGk5MLIoteig29lIQQtc,826 +typepy/checker/_none.py,sha256=xM_PEJQx1WpPUYAN6Bwgl-_IqAmhrV1oie8XIbm_C5Y,617 +typepy/checker/_realnumber.py,sha256=jtA-rv19NUBRjwAS7owNbXehJLB10dS2RCt8sLbIv5Y,1905 +typepy/checker/_string.py,sha256=Wyte6y2c2RYDCvi9BF5exzFXGscgtBrDPY3K5rhrzYs,2055 +typepy/converter/__init__.py,sha256=aN7I5tHOx93voqOeakOoad6JF07mdAKnEk0kknduo3Q,824 +typepy/converter/__pycache__/__init__.cpython-310.pyc,, +typepy/converter/__pycache__/_bool.cpython-310.pyc,, +typepy/converter/__pycache__/_bytes.cpython-310.pyc,, +typepy/converter/__pycache__/_datetime.cpython-310.pyc,, +typepy/converter/__pycache__/_dictionary.cpython-310.pyc,, +typepy/converter/__pycache__/_integer.cpython-310.pyc,, +typepy/converter/__pycache__/_interface.cpython-310.pyc,, +typepy/converter/__pycache__/_ipaddress.cpython-310.pyc,, +typepy/converter/__pycache__/_list.cpython-310.pyc,, +typepy/converter/__pycache__/_nop.cpython-310.pyc,, +typepy/converter/__pycache__/_realnumber.cpython-310.pyc,, +typepy/converter/__pycache__/_string.cpython-310.pyc,, +typepy/converter/_bool.py,sha256=ROvCowqO6nBk_Ywxcc6SUIvDVcO9acWUfsZ1fCo4Dig,1306 +typepy/converter/_bytes.py,sha256=L8e4DJ3qVqLkc2g9zlD2EMyJHYwBL4aB2EyH9w50-B4,291 +typepy/converter/_datetime.py,sha256=AaSsLhJtaNXB6TzaL98m2dpu61ZQBhbJ5MnF_sTiw5U,5382 +typepy/converter/_dictionary.py,sha256=v2ZCaSNq2U-QOgF86BTGfr9fcD0DM8UD2Oe0yFRGVAA,655 +typepy/converter/_integer.py,sha256=9ivAktrr4UQ2Yj4GVZ8Bij_Q11lqGpCxSiTU0VWtGQs,1015 +typepy/converter/_interface.py,sha256=TcqsYIsnbM3LX20k0vx7eCZnxk_Wyo6fnbBvlw4C5RY,661 +typepy/converter/_ipaddress.py,sha256=KrGcw-kn8oD0fnc3R6yaqrbgkXJWdr8XDAqFt6HJoog,843 +typepy/converter/_list.py,sha256=35ERzQ7mQXO0g5ax2Rvk93nC5FYVuAzNEQtAcZKp92E,426 +typepy/converter/_nop.py,sha256=DOkVEKioITGa_pPpgj14VClVj0ELLOjX0sgLN4nl-WI,222 +typepy/converter/_realnumber.py,sha256=7oPwNB8zWtDR6rB1JsRE4JD0CfXFGGGp-LCOmKlDPiw,1265 +typepy/converter/_string.py,sha256=Hj1G3tq0n6Jrbt3RCFAihbMlASm-QmQKgaWcsMxIbYw,498 +typepy/error.py,sha256=9tKHKExk8rOLQGtLfQQewBBaqnS39egkJYpiC7G1pWo,178 +typepy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +typepy/type/__init__.py,sha256=BVyVbzsx2snaim_h-ibWpXELDkRxTqjLCE-xK5FliAg,743 +typepy/type/__pycache__/__init__.cpython-310.pyc,, +typepy/type/__pycache__/_base.cpython-310.pyc,, +typepy/type/__pycache__/_binary.cpython-310.pyc,, +typepy/type/__pycache__/_bool.cpython-310.pyc,, +typepy/type/__pycache__/_bytes.cpython-310.pyc,, +typepy/type/__pycache__/_datetime.cpython-310.pyc,, +typepy/type/__pycache__/_dictionary.cpython-310.pyc,, +typepy/type/__pycache__/_infinity.cpython-310.pyc,, +typepy/type/__pycache__/_integer.cpython-310.pyc,, +typepy/type/__pycache__/_ipaddress.cpython-310.pyc,, +typepy/type/__pycache__/_list.cpython-310.pyc,, +typepy/type/__pycache__/_nan.cpython-310.pyc,, +typepy/type/__pycache__/_none.cpython-310.pyc,, +typepy/type/__pycache__/_realnumber.cpython-310.pyc,, +typepy/type/__pycache__/_string.cpython-310.pyc,, +typepy/type/_base.py,sha256=v9Robzl8j7vFkJoqikzOK9Y31m7OF9QZwvx3hMFRxn4,3552 +typepy/type/_binary.py,sha256=1LV28p-B7q7KTqIWPd2ONPrQH8AmkGOV8imH_a95V_M,794 +typepy/type/_bool.py,sha256=QHYMUKTDTKEeXFilPqoVGFi9cz69qfqrSSPJhRQMGbM,844 +typepy/type/_bytes.py,sha256=i58k-iFWQUMTbp6fJ998KWKoEazvSWwly6LKmB2lD08,792 +typepy/type/_datetime.py,sha256=0hiq7E2DkfOzgQZY7TQ6y8B7edFCqQ7CUMZAOTkmXSI,835 +typepy/type/_dictionary.py,sha256=0UnNrzvsisNQxD6eCcCpNtwRLdXfxSDUgcA9M9geeJ0,849 +typepy/type/_infinity.py,sha256=8pnVfBwlKIEo35tbvSqiCOYsnDpZ1TA_F0tn3oDFAhQ,829 +typepy/type/_integer.py,sha256=HBgXgCfT7TtbHNigVK__-Lbaocb7CY29n6WzkWLbYjY,864 +typepy/type/_ipaddress.py,sha256=MroUImyV-POnQTvmpMpOGH7-PAwAFQz-HU5rjTV9A0c,843 +typepy/type/_list.py,sha256=_KBDpeKDdIEb3dLMJv8vkHjAY9awiKz7RoMQdnCMF6o,807 +typepy/type/_nan.py,sha256=9jbhUqOLXstqr8-FuBKikImEOk5C5RO0WtqpRbj27zU,804 +typepy/type/_none.py,sha256=VGIXkKgdONkcu2CXGPfgusoo-VL-xkEV3rKldPDAbac,859 +typepy/type/_realnumber.py,sha256=_dNKpzDr8FaImBbx84Yr2fsJNBicRE_1QdRu8NrzDq4,913 +typepy/type/_string.py,sha256=Dd0mOTQ_uxXQND-l-AFkzwNFK8EwGg_C-EyQw4TxOXg,1505 diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/WHEEL b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..8245b7ba766af5b5d3536966c639c373c6fc5f57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/typepy-1.3.2.dist-info/top_level.txt @@ -0,0 +1 @@ +typepy