diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f645a494ade557512ceaeb994f05663a910b43b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .reshape_meg_2d import reshape_meg_2d_parallel + +from .deepspeed_checkpoint import DeepSpeedCheckpoint + +from .utils import (get_layer_ckpt_name_for_rank, get_model_ckpt_name_for_rank, get_zero_ckpt_name_for_rank) + +from .reshape_utils import (merge_state) + +from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor) + +from .zero_checkpoint import ZeROCheckpoint + +from .universal_checkpoint import enable_universal_checkpoint, SubparamShape + +from .constants import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6401ca8cc9e2c522b7320aedaf48c2c572eb508e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e9e3ae5b7a5448855adfadbc31eac873e86e2e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..235da1e3d43adb7aad9e1cc3bb0c523bf3d8266c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b888cd440fc537fbc9ce20ab84d6798925dbc04b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ffa8544c8a002ba2ee02675824acb1cad16107 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9888056c501877127770be23da8f4fbc77b7f72 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/universal_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/universal_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e27528727d818a0b22941c6aaf226177f8e66f31 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/universal_checkpoint.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f50374ca27e760da67261f189a9ed1cce08a2707 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..046bc242002fc3702fc35191d38fe3f41d506fdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py @@ -0,0 +1,87 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Various symbolic constants used for model checkpointing +""" + +######################################### +# Optimizer checkpoint keys +######################################### +OPTIMIZER_STATE_DICT = "optimizer_state_dict" +FP32_GROUPS = "fp32_groups" +FP32_FLAT_GROUPS = 'fp32_flat_groups' + +BASE_OPTIMIZER_STATE = 'base_optimizer_state' +BASE_OPTIMIZER_STATE_STEP = 'base_optimizer_state_step' +SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups" +PARAM_GROUPS = 'param_groups' +GROUP_PADDINGS = 'group_paddings' +PARTITION_COUNT = 'partition_count' +ZERO_STAGE = 'zero_stage' +CLIP_GRAD = 'clip_grad' +FP32_WEIGHT_KEY = "fp32" +LOSS_SCALER = 'loss_scaler' + +######################################### +# Module checkpoint keys +######################################### +PARAM = 'param' +PARAM_SHAPES = 'param_shapes' +BUFFER_NAMES = 'buffer_names' +FROZEN_PARAM_SHAPES = 'frozen_param_shapes' +FROZEN_PARAM_FRAGMENTS = 'frozen_param_fragments' + +######################################### +# Checkpoint naming constants +######################################### +MODEL_FILE_PREFIX = 'mp_rank_' +ZERO_FILE_PREFIX = 'zero_pp_rank_' +OPTIM_FILE_SUFFIX = '_optim_states.pt' +MODEL_FILE_SUFFIX = '_model_states.pt' +LAYER_FILE_PREFIX = 'layer_' +BF16_ZERO_FILE_PREFIX = 'bf16_' + ZERO_FILE_PREFIX +FP16_ZERO_FILE_PREFIX = 'fp16_' + ZERO_FILE_PREFIX + +######################################### +# Checkpoint utility keys +######################################### +DS_VERSION = 'ds_version' + +######################################### +# Universal Checkpoint keys +######################################### +UNIVERSAL_CHECKPOINT_INFO = 'universal_checkpoint_info' +UNIVERSAL_CHECKPOINT_VERSION_KEY = 'universal_checkpoint_version' +# Reserve version 0.1 for the hardcoded logic used in BLOOM-176B training +UNIVERSAL_CHECKPOINT_VERSION_VALUE = 0.2 + +# Vocabulary padding +VOCAB_TENSOR = 'vocab_tensor' +PADDED_VOCAB_SIZE = 'padded_vocab_size' +ORIGINAL_VOCAB_SIZE = 'original_vocab_size' + +# Parameter splitting/merging +PARAM_SLICE_MAPPINGS = 'param_slice_mappings' +CAT_DIM = "cat_dim" +# Following is a special case where a parameter effectively contains sub parameters. +# As an example, consider Megatron-DeepSpeed GPT SWIGLU implementation (mlp.h_to_4h). +# In this case, a single parameter ia allocated contiguously, but used as separate parameters. +# When using universal checkpoint, we have to normalize the representation of the full parameter. +# We normalize it by concatenating all slices of the sub params and then concatenating the sub params. +# All concat operations are done on CAT_DIM (currently, no support for different concat dims sub params and TP slicing). +# Similarly, load_hp_checkpoint_state has to take the needed actions when loading from universal. +PARAM_N_SUB_PARAMS = "param_n_sub_params" + +SUB_PARAM_SHAPE = "sub_param_shape" + +# Regex list of parameters that require special handling +VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns' +PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns' +PARAMETER_TO_AVERAGE_PATTERNS = 'parameter_to_average_patterns' +PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns' +TP_REPLICATED_PARAMETER_PATTERNS = 'tp_replicated_parameter_patterns' +PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0 = 'parameter_with_2_sub_params_cat_dim_0' +PARAMETER_WITH_SUB_PARAMS = 'parameter_with_sub_params' +SUB_PARAMS_SHAPE = 'sub_params_shape' diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/deepspeed_checkpoint.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/deepspeed_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..8312dddd2fa60ae5ad549129044005a3c5a82e2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/deepspeed_checkpoint.py @@ -0,0 +1,294 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +from typing import Dict +import torch + +from .reshape_3d_utils import model_3d_desc +from .reshape_utils import (basic_folder_validation, merge_state, partition_data, get_files, get_files_with_prefix) + +from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX) + +from .reshape_meg_2d import reshape_meg_2d_parallel, meg_2d_parallel_map +from .zero_checkpoint import ZeROCheckpoint +from .constants import * + +EMBEDDING_LAYER_INDEX = 0 +FINAL_LAYER_NORM_INDEX = -1 +ARGS_KEY = 'args' +CHECKPOINT_INFO_KEY = 'checkpoint_info' +ITERATION_KEY = 'iteration' + +SEQUENTIAL_LAYERS = [ + 'input_layernorm.weight', 'input_layernorm.bias', 'self_attention.dense.bias', 'post_attention_layernorm.weight', + 'post_attention_layernorm.bias', 'mlp.dense_4h_to_h.bias', 'position_embeddings.weight' +] + +LAYER_CONCAT_DIM = {'self_attention.dense.weight': 1, 'mlp.dense_4h_to_h.weight': 1} + + +class DeepSpeedCheckpoint(object): + + def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): + self.dir = dir + + pipeline_parallel = len(get_files_with_prefix(get_files(dir), LAYER_FILE_PREFIX)) > 0 + + self._validate_folder(dir, pipeline_parallel) + + self.zero_checkpoint = ZeROCheckpoint(dir) + + self.file_list = get_files(dir) + self.layer_files = get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX) + self.mp_rank_files = get_files_with_prefix(self.file_list, MODEL_FILE_PREFIX) + + self.layer_keys = self._get_layer_keys() + self.layer_count = len(self.layer_keys) + + self.tp_degree = self.zero_checkpoint.get_src_tp_degree() if tp_degree is None else tp_degree + self.pp_degree = self.zero_checkpoint.get_src_pp_degree() if pp_degree is None else pp_degree + self.dp_degree = self.zero_checkpoint.get_src_dp_degree() if dp_degree is None else dp_degree + + self.original_world_size = self.zero_checkpoint.get_src_tp_degree() * self.zero_checkpoint.get_src_pp_degree( + ) * self.zero_checkpoint.get_src_dp_degree() + self.world_size = self.tp_degree * self.pp_degree * self.dp_degree + + self.old_2d_map = meg_2d_parallel_map(self.zero_checkpoint.get_src_pp_degree(), + self.zero_checkpoint.get_src_tp_degree()) + self.old_2d_map.simple_init() + self.new_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.zero_checkpoint.get_src_pp_degree(), + old_tp_degree=self.zero_checkpoint.get_src_tp_degree(), + new_pp_degree=self.pp_degree, + new_tp_degree=self.tp_degree) + + if self.is_change_pp_degree() or self.is_change_tp_degree() or self.is_change_dp_degree(): + self.zero_checkpoint.reshape(model_3d_desc(self.pp_degree, self.tp_degree, self.dp_degree)) + + self.global_state = {} + + self._sanity_check() + self.pp_to_transformer_map = self._build_pp_transformer_map() + self.transformer_file_map = self._build_transformer_file_map() + self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX) + self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX) + self._build_global_state() + + def is_change_tp_degree(self): + return self.tp_degree != self.zero_checkpoint.get_src_tp_degree() + + def is_change_pp_degree(self): + return self.pp_degree != self.zero_checkpoint.get_src_pp_degree() + + def is_change_dp_degree(self): + return self.dp_degree != self.zero_checkpoint.get_src_dp_degree() + + def show_2d_mapping(self): + print(f'reshaped 2d map ---- begin') + + for i in range(self.pp_degree): + for j in range(self.tp_degree): + file_list = self.get_2d_parallel_files(pp_index=i, tp_index=j) + print(f'[{i}, {j}] = {file_list}') + + print(f'reshaped 2d map ---- end') + + def show_tp_embedding_map(self): + self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers') + + def show_tp_final_norm_map(self): + self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers') + + def show_pp_transformer_map(self): + self._dump_mapping(self.pp_to_transformer_map, 'pp_to_transformer_layers') + + def show_transformer_file_map(self): + self._dump_mapping(self.transformer_file_map, 'rank_to_transformer_files') + + def _build_global_state(self): + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None) + + def get_zero_checkpoint_state(self, pp_index, tp_index, dp_index) -> dict: + return self.zero_checkpoint.get_state_for_rank(pp_index=pp_index, + tp_index=tp_index, + dp_index=dp_index, + keys_to_ignore=[PARAM_SHAPES]) + + def get_zero_files(self, pp_index, tp_index, dp_index) -> list: + return self.zero_checkpoint.get_files_for_rank(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index) + + def get_embedding_layer_id(self): + return self.layer_keys[EMBEDDING_LAYER_INDEX] + + def get_final_norm_layer_id(self): + return self.layer_keys[FINAL_LAYER_NORM_INDEX] + + def get_iteration(self): + if not ITERATION_KEY in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + + return self.global_state[ITERATION_KEY] + + def get_embedding_state(self, tp_index: int) -> Dict: + assert tp_index in self.tp_to_embedding_map.keys() + sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]] + sd = self._merge_state_dicts(sd_list) + return sd + + def get_embedding_files(self, tp_index: int) -> list: + assert tp_index in self.tp_to_embedding_map.keys() + return self.tp_to_embedding_map[tp_index] + + def _get_checkpoint_value(self, key): + if not key in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[key] = sd.get(key, None) + + return self.global_state[key] + + def get_args(self): + return self._get_checkpoint_value(ARGS_KEY) + + def get_checkpoint_info(self, info_key=CHECKPOINT_INFO_KEY): + return self._get_checkpoint_value(info_key) + + def get_2d_parallel_state(self, tp_index: int, pp_index: int) -> dict: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + fname_list = self.get_2d_parallel_files(tp_index=tp_index, pp_index=pp_index) + sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list] + + merged_sd = None + for sd in sd_list: + if merged_sd is None: + merged_sd = sd + else: + merged_sd = merge_state(merged_sd, sd) + + return merged_sd + + def get_transformer_state(self, tp_index: int, pp_index: int) -> list: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + t_list = [] + for fname_list in self.transformer_file_map[(tp_index, pp_index)]: + sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list] + sd = self._merge_state_dicts(sd_list) + t_list.append(sd) + return t_list + + def get_pp_transformer_map(self, pp_index: int) -> list: + assert pp_index < self.pp_degree + return self.pp_to_transformer_map[pp_index] + + def get_final_norm_state(self, tp_index: int) -> Dict: + assert tp_index in self.tp_to_final_norm_map.keys() + sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu')) + return sd + + def get_final_norm_files(self, tp_index: int) -> list: + assert tp_index in self.tp_to_final_norm_map.keys() + return self.tp_to_final_norm_map[tp_index] + + def _build_tp_other_layer_map(self, layer_index: int): + data_map = {} + if len(self.layer_files) < 1: + return data_map + assert layer_index <= len(self.layer_files) + layer_files = get_files_with_prefix(self.layer_files, self.layer_keys[layer_index]) + layer_file_partitions = partition_data(layer_files, self.tp_degree) + data_map = {i: flist for i, flist in enumerate(layer_file_partitions)} + return data_map + + def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + file_indices = self.new_2d_map.get_data(pp_index=pp_index, tp_index=tp_index) + return [self.mp_rank_files[i] for i in file_indices] + + def _build_pp_transformer_map(self): + data_map = {} + if self.pp_degree > 0: + transformer_layers = self.layer_keys[1:-1] + layers_per_pp = len(transformer_layers) // self.pp_degree + data_map = { + i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] + for i in range(0, self.pp_degree) + } + return data_map + + def _dump_mapping(self, data_map, map_tag=None): + if map_tag is not None: + print(f'Dump mapping: {map_tag}') + for k, v in data_map.items(): + print(f'{k} = {v}') + + def _build_transformer_file_map(self): + transformer_layer_keys = self.layer_keys[1:-1] + file_map = {} + # XXX: this is not guaranteed + layers_per_pp = 1 + if self.pp_degree > 0: + layers_per_pp = len(transformer_layer_keys) // self.pp_degree + #print(f"{transformer_layer_keys} {layers_per_pp}") + for key_index, layer_key in enumerate(transformer_layer_keys): + pp_index = key_index // layers_per_pp + layer_files = get_files_with_prefix(self.layer_files, layer_key) + layer_file_partitions = partition_data(layer_files, self.tp_degree) + for tp_index in range(self.tp_degree): + map_key = (tp_index, pp_index) + if not map_key in file_map.keys(): + file_map[map_key] = [] + file_map[map_key].append(layer_file_partitions[tp_index]) + + return file_map + + def _sanity_check(self): + assert len(self.mp_rank_files) % self.tp_degree == 0 + assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0 + assert self.zero_checkpoint.num_files % (self.tp_degree) == 0 + # XXX: fix me - isn't always the case + # only true with --pp-partition-method 'type:transformer|embedding' \ + # assert (len(self.layer_keys) - 2) % self.pp_degree == 0 + + def validate_files(self): + for file in self.file_list: + if not os.path.isfile(file): + print(f'Error: {file} is not existent') + + def _get_layer_keys(self): + key_set = set() + key_len = len(LAYER_FILE_PREFIX) + 2 + for file_path in self.layer_files: + _, fname = os.path.split(file_path) + key_set.add(fname[:key_len]) + return sorted(list(key_set)) + + def _merge_state_dicts(self, sd_list): + merged_sd = {} + for key in sd_list[0].keys(): + if not key in SEQUENTIAL_LAYERS: + cat_dim = LAYER_CONCAT_DIM.get(key, 0) + merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim) + else: + merged_sd[key] = sd_list[0][key] + + return merged_sd + + def _validate_folder(self, dir, pipeline_parallel): + basic_folder_validation(dir) + + file_list = get_files(dir) + file_prefix_list = [MODEL_FILE_PREFIX] + if pipeline_parallel: + file_prefix_list.extend([LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01']) + for file_prefix in file_prefix_list: + ckpt_files = get_files_with_prefix(file_list, file_prefix) + assert len( + ckpt_files + ) > 0, f'{dir} seems a bogus DeepSpeed checkpoint folder: Cannot find {file_prefix}* files in there.' diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/ds_to_universal.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/ds_to_universal.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a8276589b62fdb2b04919ec0b77829842854ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/ds_to_universal.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from functools import partial +from itertools import chain +import argparse +import glob +import itertools +from concurrent.futures import ProcessPoolExecutor +import os +import re +import shutil +import torch +import tqdm +#from pprint import pprint + +from deepspeed.checkpoint import DeepSpeedCheckpoint +from deepspeed.checkpoint import ( + OPTIMIZER_STATE_DICT, + BASE_OPTIMIZER_STATE, + SINGLE_PARTITION_OF_FP32_GROUPS, + PARAM_GROUPS, + PARAM_SLICE_MAPPINGS, + PARAM_SHAPES, + PARAM, + CAT_DIM, + PARAM_N_SUB_PARAMS, + SUB_PARAM_SHAPE, + VOCAB_TENSOR, + UNIVERSAL_CHECKPOINT_INFO, + VOCABULARY_PARAMETER_PATTERNS, + PIPELINE_REPLICATED_PARAMETER_PATTERNS, + TP_REPLICATED_PARAMETER_PATTERNS, + PARAMETER_TO_AVERAGE_PATTERNS, + PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, + PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, + PARAMETER_WITH_SUB_PARAMS, + SubparamShape, +) + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('--input_folder', type=str, required=True, help='Input DeepSpeed Checkpoint folder') + parser.add_argument('--output_folder', type=str, required=True, help='Output DeepSpeed checkpoint folder') + parser.add_argument('--num_extract_workers', + default=4, + type=int, + help='How many parallel processes to extract zero shards') + parser.add_argument( + '--num_merge_workers', + default=2, + type=int, + help= + 'How many parallel processes to merge tp slices (more memory intensive, use much fewer than --num_extract_workers))' + ) + parser.add_argument('--keep_temp_folder', + action='store_true', + help='Preserve temporary folder of intermediate checkpoint slice files. Useful for debugging.') + parser.add_argument('--no_strict', + dest='strict', + action='store_false', + help='Do not perform validity checks on converted checkpoint.') + args = parser.parse_args() + print(f'args = {args}') + return args + + +def _create_checkpoint_paths(base_folder, iteration, tp_degree, pp_degree): + path_list = [] + iter_folder = f'iter_{iteration:07d}' + for i in range(0, tp_degree): + path_list.append([]) + for j in range(0, pp_degree): + rank_folder = f'mp_rank_{i:02d}' if pp_degree == 1 else f'mp_rank_{i:02d}_{j:03d}' + ckpt_path = os.path.join(rank_folder, 'model_optim_rng.pt') + path_list[i].append(os.path.join(base_folder, iter_folder, ckpt_path)) + + return path_list + + +def _save_checkpoint(file_path, chkpt_sd): + dir, _ = os.path.split(file_path) + os.makedirs(dir, exist_ok=True) + torch.save(chkpt_sd, file_path) + + +def extract_zero_shards(dir, ds_checkpoint, indices_3D): + pp_index, tp_index, dp_index = indices_3D + sd = ds_checkpoint.get_zero_checkpoint_state(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index) + + # pprint(f"Processing {dp_index=} {pp_index=}, {tp_index=}") + + optim_sd = sd[OPTIMIZER_STATE_DICT] + param_slice_mappings = optim_sd[PARAM_SLICE_MAPPINGS] + universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO) + pipeline_replicated_params = universal_checkpoint_info.get(PIPELINE_REPLICATED_PARAMETER_PATTERNS, []) + # print(f'{pipeline_replicated_params=}') + + # dict + state_groups = optim_sd[BASE_OPTIMIZER_STATE]["state"] + # list + fp32_groups = optim_sd[SINGLE_PARTITION_OF_FP32_GROUPS] + param_groups_cnt = len(state_groups) + + for param_group_id in range(param_groups_cnt): + + flat_state = dict( + exp_avg=state_groups[param_group_id]["exp_avg"], + exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"], + fp32=fp32_groups[param_group_id], + ) + + if "step" in state_groups[param_group_id]: + flat_state["step"] = state_groups[param_group_id]["step"] + + for name, fragment_mapping in param_slice_mappings[param_group_id].items(): + if pp_index > 0 and any(re.match(pattern, name) for pattern in pipeline_replicated_params): + # Skip tied weights that are replicated in first and last pp stages + continue + + # pprint(f"dpt{dp_index}{pp_index}{tp_index} {param_group_id} {name} => {fragment_mapping.start}:{fragment_mapping.numel}") + for state_key in flat_state.keys(): + dump_param_fragment(dir, tp_index, dp_index, state_key, flat_state[state_key], name, + fragment_mapping.start, fragment_mapping.numel) + + +cnt = 0 + + +def dp_index_to_str(dp_index): + return f"{dp_index:0>2d}" + + +def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, param_name, offset, numel): + + global cnt # temp hack + + param_base_path = os.path.join(dir, param_name, str(tp_index)) + os.makedirs(param_base_path, exist_ok=True) + + cnt += 1 + + path = os.path.join(param_base_path, f"{state_name}.{dp_index_to_str(dp_index)}") + + #print(f"{param_name}: {offset}: {numel} => {path}") + + # State might be a python int or a tensor + if state_name != "step" and torch.is_tensor(state_flat_tensor): + state_flat_tensor = state_flat_tensor.narrow(0, offset, numel).clone() + _save_checkpoint(path, state_flat_tensor) + + +def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape): + slices = [] + for tp_index in range(tp_degree): + prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}") + paths = glob.glob(f"{prefix_path}.*") + + if len(paths) == 0: + continue + + pattern = re.compile(f"{prefix_path}\\.([0-9]+)") + dp_indices = set() + for p in paths: + m = pattern.match(p) + if m: + dp_indices.add(int(m.group(1))) + else: + raise ValueError(f"Cannot parse dp_rank from {p}") + + paths = [f"{prefix_path}.{dp_index_to_str(dp_index)}" for dp_index in sorted(list(dp_indices))] + shards = [torch.load(p) for p in paths] + + if state == "step": + assert all(v == shards[0] for v in shards), "All shards must have the same step value" + slice = shards[0] + else: + slice = torch.cat(shards, dim=0).reshape(slice_shape) + + slices.append(slice) + return slices + + +def merge_tp_slices(ds_checkpoint, dir, slice_dir, tp_degree, name_and_shape): + + name, shape = name_and_shape + slice_base_path = os.path.join(slice_dir, name) + param_base_path = os.path.join(dir, name) + + universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO) + replicated_parameters = universal_checkpoint_info.get(TP_REPLICATED_PARAMETER_PATTERNS, []) + parameters_to_average = universal_checkpoint_info.get(PARAMETER_TO_AVERAGE_PATTERNS, []) + parameters_with_row_parallelism = universal_checkpoint_info.get(PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, []) + vocabulary_parameters = universal_checkpoint_info.get(VOCABULARY_PARAMETER_PATTERNS, []) + parameters_with_2_sub_params_cat_dim_0 = universal_checkpoint_info.get(PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, []) + parameter_with_sub_params = universal_checkpoint_info.get(PARAMETER_WITH_SUB_PARAMS, []) + + unmatched_patterns = set(replicated_parameters + parameters_to_average + parameters_with_row_parallelism + + vocabulary_parameters + parameters_with_2_sub_params_cat_dim_0) + unmatched_patterns.update(chain.from_iterable(SubparamShape(**s).patterns for s in parameter_with_sub_params)) + + def get_matched_pattern(patterns_, name_): + matched_ = [pattern_ for pattern_ in patterns_ if re.match(pattern_, name_)] + assert len(matched_) <= 1, f'Got more than one matching patterns={matched_} for {name_}' + if matched_: + pattern_ = matched_[0] + unmatched_patterns.discard(pattern_) + return pattern_ + return None + + def get_matched_sub_params_pattern(name_): + for subparam_shape_dict in parameter_with_sub_params: + subparam_shape = SubparamShape(**subparam_shape_dict) + for pattern_ in subparam_shape.patterns: + if re.match(pattern_, name_): + unmatched_patterns.discard(pattern_) + return subparam_shape + return None + + matched_sub_params_shape = get_matched_sub_params_pattern(name) + + step_merged = _merge_zero_shards(slice_base_path, "step", tp_degree, shape) + if step_merged: + _save_checkpoint(os.path.join(param_base_path, f"step.pt"), step_merged[0]) + + for state in ("fp32", "exp_avg", "exp_avg_sq"): + slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape) + final_path = os.path.join(param_base_path, f"{state}.pt") + + #print(f"Expected shape: {shape}") + #print(f"Fragment sizes:", list(frag.shape for frag in slices)) + ckpt_dict = {} + if get_matched_pattern(replicated_parameters, name): + if len(slices) > 1: + assert all([slices[0].equal(other_slice) for other_slice in slices[1:]]) + param = slices[0] + # print(f'replicate {name} using first slice') + elif get_matched_pattern(parameters_to_average, name): + param = sum(slices) / len(slices) + # print(f'merge {name} using average') + elif get_matched_pattern(parameters_with_2_sub_params_cat_dim_0, name): + cat_dim = 0 + chunked_slices = [torch.chunk(s, 2, dim=cat_dim) for s in slices] + merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=cat_dim) + merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=cat_dim) + param = torch.cat([merged_chunks_0, merged_chunks_1], dim=cat_dim) + ckpt_dict[CAT_DIM] = cat_dim + ckpt_dict[PARAM_N_SUB_PARAMS] = 2 + elif matched_sub_params_shape: + merged_chunks = [] + partition_dim = matched_sub_params_shape.partition_dim + + sub_dim_sizes = matched_sub_params_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in matched_sub_params_shape.shape] + partition_shape = [d // tp_degree if i == partition_dim else d for i, d in enumerate(partition_shape)] + slices = [s.view(partition_shape) for s in slices] + + offset = 0 + for sub_dim_size in sub_dim_sizes: + part_sub_dim_size = sub_dim_size // tp_degree + merged_chunks.append( + torch.cat([s.narrow(partition_dim, offset, part_sub_dim_size) for s in slices], dim=partition_dim)) + offset += part_sub_dim_size + param = torch.cat(merged_chunks, dim=partition_dim) + ckpt_dict[SUB_PARAM_SHAPE] = matched_sub_params_shape + else: + cat_dim = 1 if get_matched_pattern(parameters_with_row_parallelism, name) else 0 + # print(f"merge {name} with CAT DIM: {cat_dim}") + param = torch.cat(slices, dim=cat_dim) + ckpt_dict[CAT_DIM] = cat_dim + + if get_matched_pattern(vocabulary_parameters, name): + #print(f"Before {param.shape=}") + # strip padding + original_vocab_size = universal_checkpoint_info['original_vocab_size'] + param = param[:original_vocab_size, :] + ckpt_dict[VOCAB_TENSOR] = True + #print(f"After {param.shape=}") + + #print(f"Final shape: {param.shape}") + ckpt_dict[PARAM] = param + _save_checkpoint(final_path, ckpt_dict) + + return unmatched_patterns + + +def _do_parallel_work(do_work, work_chunks, num_workers): + results = [] + if num_workers > 1: + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_list = [executor.submit(do_work, work) for work in work_chunks] + for f in tqdm.tqdm(future_list): + results.append(f.result()) + else: + # No parallel pass for unit testing + # We can't create child processes in tests + for work in tqdm.tqdm(work_chunks): + results.append(do_work(work)) + return results + + +def _extract_zero_shard_files(args, ds_checkpoint, temp_dir): + _3d_range_list = list( + itertools.product(range(ds_checkpoint.pp_degree), range(ds_checkpoint.tp_degree), + range(ds_checkpoint.dp_degree))) + #pprint(f'{_3d_range_list=}') + + do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint) + _do_parallel_work(do_work, _3d_range_list, args.num_extract_workers) + + +def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir): + zero_output_folder = os.path.join(args.output_folder, "zero") + do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree) + unmatched_patterns_lists = _do_parallel_work(do_work, list(slice_shapes.items()), args.num_merge_workers) + + # verify that all patterns were used + # if a pattern was not used by any of the workers, then it was not used at all -> assert/alert + sets = [set(lst) for lst in unmatched_patterns_lists] + unmatched_patterns = list(set.intersection(*sets)) + if args.strict: + assert not unmatched_patterns, f'Unused patterns={unmatched_patterns} while merging tp slices' + elif unmatched_patterns: + print(f'Warning: Unused patterns={unmatched_patterns} while merging tp slices') + + +def _save_optimizer_state(args, ds_checkpoint): + sharded_states = [BASE_OPTIMIZER_STATE, PARAM_SLICE_MAPPINGS, SINGLE_PARTITION_OF_FP32_GROUPS] + sd = ds_checkpoint.get_zero_checkpoint_state(pp_index=0, tp_index=0, dp_index=0) + + optim_sd = sd[OPTIMIZER_STATE_DICT] + output_sd = {k: v for k, v in optim_sd.items() if k not in sharded_states} + output_sd[PARAM_GROUPS] = optim_sd[BASE_OPTIMIZER_STATE][PARAM_GROUPS] + zero_output_folder = os.path.join(args.output_folder, "zero") + output_file_path = os.path.join(zero_output_folder, f"optimizer_state.pt") + _save_checkpoint(output_file_path, output_sd) + + +def _check_for_required_state(ds_checkpoint): + universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO) + assert universal_checkpoint_info is not None, f'Required {UNIVERSAL_CHECKPOINT_INFO} state is missing in checkpoint. Verify that client creates this state.' + + +def main(args): + print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint') + + print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Universal checkpoint in {args.output_folder}') + + ds_checkpoint = DeepSpeedCheckpoint(args.input_folder) + _check_for_required_state(ds_checkpoint) + + iteration = ds_checkpoint.get_iteration() + #_create_latest_file(args.output_folder, iteration) + checkpoint_paths = _create_checkpoint_paths(args.output_folder, iteration, ds_checkpoint.tp_degree, + ds_checkpoint.pp_degree) + + slice_shapes = [] + for mp_rank_file in ds_checkpoint.mp_rank_files: + mp_sd = torch.load(mp_rank_file, map_location=torch.device('cpu')) + slice_shapes += mp_sd[PARAM_SHAPES] + + # fix back to normal flat dict, merge duplicates for tp>1 + slice_shapes = dict((k, v) for d in slice_shapes for k, v in d.items()) + temp_dir = os.path.join(args.output_folder, 'tmp') + + print('*** 1. Extracting ZeRO fragments') + _extract_zero_shard_files(args, ds_checkpoint, temp_dir) + + print('*** 2. Merging slices .....') + _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir) + + print('*** 3. Saving common optimizer states') + _save_optimizer_state(args, ds_checkpoint) + + if not args.keep_temp_folder: + shutil.rmtree(temp_dir, ignore_errors=True) + + # Copy mp* files into output folder + for f in glob.glob(os.path.join(args.input_folder, 'mp*')): + shutil.copy2(f, args.output_folder) + + # Update latest to output folder + checkpoint_root_folder, step_folder = os.path.split(args.output_folder) + latest_file = os.path.join(checkpoint_root_folder, 'latest_universal') + with open(latest_file, "w") as f: + f.write(step_folder) + + print('*** Done!') + + +if __name__ == "__main__": + args = parse_arguments() + main(args) diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02b3947624a12153aeb83967e515b18002890a40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .reshape_utils import (get_files, get_files_with_prefix, partition_data, get_zero_files) + +from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX) + +from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map) + +PP_DIM = 'PP' +TP_DIM = 'TP' +DP_DIM = 'DP' + + +class model_3d_desc(object): + + def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1): + self.pp_degree = pp_degree + self.tp_degree = tp_degree + self.dp_degree = dp_degree + + def reshape(self, target_3d_desc, verbose=False): + valid_reshape, reshape_errors = self.can_reshape(target_3d_desc) + assert valid_reshape, ','.join(reshape_errors) + tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree, + old_tp_degree=self.tp_degree, + new_pp_degree=target_3d_desc.pp_degree, + new_tp_degree=target_3d_desc.tp_degree, + verbose=verbose) + + flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map, + src_2d_size=self.pp_degree * self.tp_degree, + dp_degree=self.dp_degree) + + return unflatten_dp_dimension(meg_2d_map=flat_3d_map, dp_degree=target_3d_desc.dp_degree) + + def get_desc(self): + return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})' + + def world_size(self): + return self.pp_degree * self.tp_degree * self.dp_degree + + def is_valid(self, pp_index, tp_index, dp_index): + err_msg = [] + valid = True + for index, degree, dim_name in [(pp_index, self.pp_degree, PP_DIM), (tp_index, self.tp_degree, TP_DIM), + (dp_index, self.dp_degree, DP_DIM)]: + if index >= degree: + valid = False + err_msg.append(f'{dim_name} indexing error: index {index} >= degree {degree}') + + return valid, err_msg + + def can_reshape(self, target_3d_desc): + err_msg = [] + if target_3d_desc.pp_degree > self.pp_degree: + err_msg.append( + f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}') + + if target_3d_desc.tp_degree > self.tp_degree: + err_msg.append( + f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}') + + if target_3d_desc.dp_degree > self.dp_degree: + err_msg.append( + f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}') + + return len(err_msg) == 0, err_msg + + +def get_model_3d_descriptor(dir): + file_list = get_files(dir) + zero_file_list = get_zero_files(dir) + num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01')) + if num_pp0_files > 0: + tp_degree = num_pp0_files + pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree + dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree)) + else: + tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) + dp_degree = max(1, len(zero_file_list) // tp_degree) + pp_degree = 1 + + return model_3d_desc(pp_degree, tp_degree, dp_degree) + + +def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree): + new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree) + for pp_index in range(meg_2d_map.pp_degree): + for tp_index in range(meg_2d_map.tp_degree): + dp0_indices = meg_2d_map.get_data(pp_index, tp_index) + for idx in dp0_indices: + dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)] + new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices) + return new_meg_2d_map + + +def unflatten_dp_dimension(meg_2d_map, dp_degree): + pp_degree = meg_2d_map.pp_degree + tp_degree = meg_2d_map.tp_degree + meg_2d_map_list = [meg_2d_parallel_map(pp_degree=pp_degree, tp_degree=tp_degree) for _ in range(dp_degree)] + for pp_index in range(pp_degree): + for tp_index in range(tp_degree): + flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index) + partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree) + for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list): + _2d_map.add_data(pp_index, tp_index, dp_indices) + + return meg_2d_map_list diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py new file mode 100644 index 0000000000000000000000000000000000000000..3bff87f4344f4a1e74b348ae77f9b9a7d9212c6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py @@ -0,0 +1,222 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .reshape_utils import partition_data + + +class meg_2d_parallel_map(object): + + def __init__(self, pp_degree, tp_degree): + self.pp_degree = pp_degree + self.tp_degree = tp_degree + self.map = {} + + def simple_init(self): + self.map = { + self._make_key(i // self.tp_degree, i % self.tp_degree): [i] + for i in range(self.pp_degree * self.tp_degree) + } + + def add_data(self, pp_index, tp_index, data): + self._validate_indices(pp_index, tp_index) + assert type(data) is list + + key = self._make_key(pp_index, tp_index) + if not key in self.map.keys(): + self.map[key] = [] + self.map[key] += data + + def get_data(self, pp_index=None, tp_index=None): + self._validate_indices(pp_index, tp_index) + pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index] + tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index] + + result = [] + for i in pp_indices: + for j in tp_indices: + result += self.map[self._make_key(i, j)] + + return result + + def print_data(self, tag): + print(f'{tag}') + for key, value in self.map.items(): + print(f'{key} = {value}') + + def _validate_indices(self, pp_index, tp_index): + assert pp_index is None or pp_index < self.pp_degree + assert tp_index is None or tp_index < self.tp_degree + + def _make_key(self, i, j): + return f'{i},{j}' + + +def _reshape_tp_dimension(old_2d_map, new_tp_degree): + old_pp_degree = old_2d_map.pp_degree + new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree) + for i in range(old_pp_degree): + ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None) + split_ranks = partition_data(ranks_for_pp_index, new_tp_degree) + for j in range(new_tp_degree): + new_2d_map.add_data(i, j, split_ranks[j]) + + return new_2d_map + + +def _reshape_pp_dimension(old_2d_map, new_pp_degree): + old_tp_degree = old_2d_map.tp_degree + new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree) + for i in range(old_tp_degree): + ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i) + split_ranks = partition_data(ranks_for_tp_index, new_pp_degree) + for j in range(new_pp_degree): + new_2d_map.add_data(j, i, split_ranks[j]) + + return new_2d_map + + +def reshape_meg_2d_parallel(old_pp_degree, old_tp_degree, new_pp_degree, new_tp_degree, verbose=False): + assert new_pp_degree <= old_pp_degree + assert new_tp_degree <= old_tp_degree + + old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree) + old_2d_map.simple_init() + if verbose: + old_2d_map.print_data(f'original_2d_map:') + + if old_tp_degree != new_tp_degree: + new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree) + else: + new_tp_map = old_2d_map + if verbose: + new_tp_map.print_data(f'after_tp_reshape:') + + if old_pp_degree != new_pp_degree: + final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree) + else: + final_map = new_tp_map + + if verbose: + final_map.print_data(f'final_2d_map:') + + return final_map + + +def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None): + """ + Initialize model data parallel groups. + + Arguments: + tp_size: number of GPUs used to parallelize model tensor. + pp_size: number of GPUs used to parallelize model pipeline. + dp_size: number of GPUs used to parallelize model data. + + Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we + use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize + the model pipeline. The present function will + create 8 tensor model-parallel groups, 4 pipeline model-parallel groups + and 8 data-parallel groups as: + 8 data_parallel groups: + [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] + 8 tensor model-parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] + 4 pipeline model-parallel groups: + [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + + world_size = tp_size * pp_size * dp_size + + print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}") + + tensor_model_parallel_size = min(tp_size, world_size) + pipeline_model_parallel_size = min(pp_size, world_size) + data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size) + + num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size + num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size + num_data_parallel_groups = world_size // data_parallel_size + + # Build the data-parallel groups. + all_dp_group_ranks = [] + for i in range(pipeline_model_parallel_size): + start_rank = i * num_pipeline_model_parallel_groups + end_rank = (i + 1) * num_pipeline_model_parallel_groups + for j in range(tensor_model_parallel_size): + ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) + all_dp_group_ranks.append(list(ranks)) + + print("DP", all_dp_group_ranks) + + # Build the model-parallel groups. + all_pp_group_ranks = [] + for i in range(data_parallel_size): + ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_dp_group_ranks] + all_pp_group_ranks.append(list(ranks)) + + print(f"PP", all_pp_group_ranks) + + # Build the tensor model-parallel groups. + all_tp_group_ranks = [] + for i in range(num_tensor_model_parallel_groups): + ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) + all_tp_group_ranks.append(list(ranks)) + + print(f"TP", all_tp_group_ranks) + + return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks + + # # Build the pipeline model-parallel groups and embedding groups + # # (first and last rank in each pipeline model-parallel group). + # for i in range(num_pipeline_model_parallel_groups): + # ranks = range(i, world_size, + # num_pipeline_model_parallel_groups) + # print(f"EMB{i}", list(ranks)) + + +def reshape(src, tgt): + """ + reshape([tp_size_src, pp_size_src, dp_size_src], + [tp_size_tgt, pp_size_tgt, dp_size_tgt]) + """ + + print(f"\n\n*** Reshaping: {src} => {tgt}") + + tp_size_src, pp_size_src, dp_size_src = src + tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt + + tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src) + tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src) + tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src) + + # handle tp contraction first + print("\n*** TP contraction:") + + for i, r in enumerate(tp_ranks1): + print(f'{tp_ranks1[i]} => {tp_ranks2[i]}') + + # handle pp contraction next + + print("\n*** PP contraction:") + + for i, r in enumerate(pp_ranks1): + print(f'{pp_ranks2[i]} => {pp_ranks3[i]}') + + +# easy +#reshape([2,2,1],[1,1,1]) + +# probably need more logic to suggest how to pack +#reshape([4,4,1],[2,2,1]) + +#reshape([2,4,2], [8,32,1]) + +# get_mpu_ranks(2,2,2) +# get_mpu_ranks(4,2,1) +# get_mpu_ranks(2,4,1) +# get_mpu_ranks(1,1,8) diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..137607721ebf91804f40105daf9ea3bf5c2600aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py @@ -0,0 +1,113 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import re +import torch +from collections import OrderedDict +from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX) + + +def basic_folder_validation(dir): + assert os.path.exists(dir), f'{dir} path does not exist' + assert os.path.isdir(dir), f'{dir} is not a folder' + + +def get_files_with_prefix(all_files, prefix): + file_list = [] + for file_path in all_files: + _, fname = os.path.split(file_path) + if fname.startswith(prefix): + file_list.append(file_path) + + return sorted(file_list) + + +def validate_files(file_list): + for file in file_list: + if not os.path.isfile(file): + print(f'Error: {file} is not existent') + + +def get_files(dir): + file_list = [] + for root, _, files in os.walk(dir): + for file in files: + file_list.append(os.path.join(root, file)) + return file_list + + +def sort_zero_files(files, prefix): + pattern = f"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)" + rank_pairs = [] + for f in files: + m = re.search(pattern, f) + if m: + dp_rank = int(m.group(1)) + mp_rank = int(m.group(2)) + rank_pairs.append((dp_rank, mp_rank, f)) + else: + raise ValueError(f"Cannot parse dp_rank and mp_rank from {f}") + + sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1])) + return [f for _, _, f in sorted_files] + + +def get_zero_files(dir): + file_list = get_files(dir) + for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: + zero_files = get_files_with_prefix(file_list, prefix) + if len(zero_files) > 0: + return sort_zero_files(zero_files, prefix) + + return [] + + +def partition_data(data_list, num_partitions): + num_elems = len(data_list) + assert num_elems % num_partitions == 0 + partition_size = num_elems // num_partitions + partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)] + return partitions_list + + +def _key_list_to_string(key_list): + return '.'.join(key_list) + + +def merge_state_dict(dict_a, dict_b, key_list): + merged_dict = type(dict_a)({}) + + for key, value in dict_b.items(): + if key in dict_a.keys(): + merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)]) + else: + merged_dict[key] = value + + return merged_dict + + +def merge_state_list(list_a, list_b, key_list): + if len(list_a) != len(list_b): + print(f'{_key_list_to_string(key_list)}') + raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}') + + return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)] + + +def merge_state(state_a, state_b, key_list=[]): + if type(state_a) != type(state_b): + key_list_string = _key_list_to_string(key_list) + print(f'key_list = {key_list_string}') + raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}') + + if type(state_a) in (dict, OrderedDict): + return merge_state_dict(state_a, state_b, key_list) + elif type(state_a) in (list, tuple): + return type(state_a)(merge_state_list(state_a, state_b, key_list)) + elif torch.is_tensor(state_a): + return torch.cat([state_a, state_b], 0) + else: + return state_a diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..064891a8bb5407202b694d8df3ef3b70cc78f3a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py @@ -0,0 +1,146 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import re +import torch +import types +from typing import List, Tuple, Union +from dataclasses import dataclass +from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS, SUB_PARAM_SHAPE) + + +@dataclass +class SubparamShape: + patterns: List[str] + shape: Tuple[Union[Tuple[int], int]] + partition_dim: int + + +def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): + hp_mapping = self._hp_mapping + hp_mapping.optim_fragment = {} + + hp_keys = [] + for file in os.listdir(folder): + # We expect files named something like "exp_avg.pt", "exp_avg_sq.pt", "fp32.pt" + pattern = r'(.+).pt' + match = re.search(pattern, file) + if match: + hp_keys.append(match.group(1)) + + step = None + for key in hp_keys: + ckpt_file = os.path.join(folder, f"{key}.pt") + ckpt_dict = torch.load(ckpt_file) + + if key == "step": + step = ckpt_dict + continue + + full_hp_param = ckpt_dict[PARAM] + + # need to deal with slices that were averaged. + # the opposite of averaging here becomes an exact copy of the first slice + # I thought of 2 ways: + # implementation a. find a way for a client to pass a dict with patterns + # if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS): + # tp_rank = 0 + # tp_world_size = 1 + # the other approach is to assume that the saved data is correct and if full_hp_param.shape == + # self.shape that means we automatically copy? + # implementation b. + # this version requires no additional data passed from the client + # if the shapes already match it must be slices that were averaged - so we just hack around those + if full_hp_param.shape == self.shape: + tp_rank = 0 + tp_world_size = 1 + + # special case for word_embeddings weights which get padded differently depending on TP degree. + # the converter to universal currently strips the original padding completely so the saved + # weight is padding-free and we just need to add new padding depending on the target TP + # degree + is_vocab_tensor = ckpt_dict.get(VOCAB_TENSOR, False) + if is_vocab_tensor: + # In the absence of data passed from the user wrt new padded vocab specific to tp degree + # we can again derive that data by reverse engineering the target shapes like so: + padded_target_vocab_size = self.shape[0] * tp_world_size + assert padded_target_vocab_size >= full_hp_param.shape[0], \ + f'Vocab tensor padded size {padded_target_vocab_size} < loaded universal size {full_hp_param.shape[0]}' + if padded_target_vocab_size > full_hp_param.shape[0]: + padding_size = padded_target_vocab_size - full_hp_param.shape[0] + full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0) + + full_param_numel = full_hp_param.numel() + tp_slice_numel = self.numel() + # if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder: + # print_rank_0(f'{full_hp_param[:10]=}', force=True) + + + assert full_param_numel == tp_world_size * tp_slice_numel, \ + f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}' + + # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}") + # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}") + + sub_param_shape = ckpt_dict.get(SUB_PARAM_SHAPE, None) + # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse + # special case is when a single parameter is effectively a container for multiple sub parameters + # (more details at PARAM_N_SUB_PARAMS definition) + chunk_dim = ckpt_dict.get(CAT_DIM, 0) + n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1) + if sub_param_shape: + partition_dim = sub_param_shape.partition_dim + sub_dim_sizes = sub_param_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in sub_param_shape.shape] + full_hp_param = full_hp_param.view(partition_shape) + + offset = 0 + merged_chunks = [] + for sub_dim_size in sub_dim_sizes: + sub_params_tp_slice = full_hp_param.narrow(partition_dim, + offset, sub_dim_size).chunk(tp_world_size, + dim=partition_dim)[tp_rank] + merged_chunks.append(sub_params_tp_slice) + offset += sub_dim_size + tp_hp_slice = torch.cat(merged_chunks, dim=partition_dim) + + elif n_sub_params > 1: + sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim) + sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params] + tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim) + else: + # this performs the opposite of cat when merging TP slices + tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank] + + tp_hp_slice = tp_hp_slice.flatten() + + lp_frag_address = hp_mapping.lp_fragment_address + tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel) + + # print(f"{key} SHAPE: {tp_hp_slice.shape=}") + # print(f"{key} SHAPE: {dst_tensor.shape=}") + # print(f"{key} SHAPE: {tp_hp_fragment.shape=}") + + if key == FP32_WEIGHT_KEY: + dst_tensor = hp_mapping.get_hp_fragment() + assert dst_tensor.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' + dst_tensor.data.copy_(tp_hp_fragment.data) + else: + assert tp_hp_fragment.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {tp_hp_fragment.numel()} != src numel {lp_frag_address.numel}' + + hp_mapping.optim_fragment[key] = tp_hp_fragment.clone().detach() + + return step + + +def enable_universal_checkpoint(param_list): + for param in param_list: + param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param) diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c305e8884e83b842815eb84c5cd9b582c7b53483 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX) + + +def get_model_ckpt_name_for_rank(base_folder, mp_rank_str): + ckpt_name = os.path.join( + base_folder, + MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX, + ) + return ckpt_name + + +def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank): + zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}' + mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}' + zero_ckpt_name = os.path.join( + base_folder, + zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX, + ) + return zero_ckpt_name + + +def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank): + ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}' + ckpt_path = os.path.join(base_folder, ckpt_file) + return ckpt_path + + +# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save() +# saves the underlying storage rather than the slice of the storage corresponding to individual tensors. +# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers. +# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size. +# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat. +# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing +def clone_tensors_for_torch_save(item, device=torch.device('cpu')): + """ + Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device. + Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. + + Parameters: + - ``item``: tensor to clone or (possibly nested) container of tensors to clone. + - ``device``: target device (defaults to 'cpu') + + Returns: + - copy of ``item`` with cloned tensors on target device + """ + if torch.is_tensor(item): + return item.detach().clone().to(device) + elif isinstance(item, list): + return [clone_tensors_for_torch_save(v, device) for v in item] + elif isinstance(item, tuple): + return tuple([clone_tensors_for_torch_save(v, device) for v in item]) + elif isinstance(item, dict): + return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()}) + else: + return item diff --git a/venv/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..6730b93dfd4fbe8502c69ced3a31592b79d00c32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py @@ -0,0 +1,142 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT) + +from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state) + +from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor) + +GROUP_STATE_KEY = 'state' + + +class ZeROCheckpoint(object): + + def __init__(self, dir): + basic_folder_validation(dir) + self.dir = dir + self.file_list = get_zero_files(dir) + self.num_files = len(self.file_list) + assert self.num_files > 0, f'No ZeRO files found in {dir}' + + self.src_3d = get_model_3d_descriptor(dir) + self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree, + tp_degree=self.src_3d.tp_degree, + dp_degree=self.src_3d.dp_degree) + self._3d_file_map = self.src_3d.reshape(self.target_3d) + + def get_src_world_size(self): + return self.src_3d.world_size() + + def get_src_tp_degree(self): + return self.src_3d.tp_degree + + def get_src_pp_degree(self): + return self.src_3d.pp_degree + + def get_src_dp_degree(self): + return self.src_3d.dp_degree + + def get_file_indices_for_rank(self, pp_index, tp_index, dp_index): + assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}' + dp_2d_map = self._3d_file_map[dp_index] + return dp_2d_map.get_data(pp_index, tp_index) + + def get_files_for_rank(self, pp_index, tp_index, dp_index): + file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index) + return [self.file_list[idx] for idx in file_idx_list] + + def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True): + state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index) + merged_sd = None + for state_file in state_file_list: + sd = torch.load(state_file, map_location=torch.device('cpu')) + for key in keys_to_ignore: + sd.pop(key, None) + + if strip_tensor_paddings: + self._strip_tensor_paddings(sd) + + if merged_sd is None: + merged_sd = sd + else: + merged_sd = merge_state(merged_sd, sd) + + self._update_partition_count(merged_sd) + if strip_tensor_paddings: + self._clear_group_paddings(merged_sd) + + return merged_sd + + def print_3d_index_map(self, tag=None): + if tag: + print(f'3D index map: {tag}') + for dp_index, _2d_map in enumerate(self._3d_file_map): + _2d_map.print_data(f'dp = {dp_index}') + + def print_3d_file_map(self, tag=None): + if tag: + print(f'3D file map: {tag}') + for dp_index, _2d_map in enumerate(self._3d_file_map): + for pp_index in _2d_map.pp_degree: + for tp_index in _2d_map.tp_degree: + file_index_list = _2d_map.get_data(pp_index, tp_index) + file_list = [self.file_list[idx] for idx in file_index_list] + print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}') + + def reshape(self, target_3d_desc: model_3d_desc): + self.target_3d = target_3d_desc + self._3d_file_map = self.src_3d.reshape(self.target_3d) + + def _strip_tensor_paddings(self, sd): + param_group_states = self._get_param_group_states(sd) + if param_group_states is None: + return + + group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) + if group_paddings is None: + return + + for key, group_state in param_group_states.items(): + if group_paddings[key] == 0: + continue + for state_name, state_value in group_state.items(): + if state_name != "step" and torch.is_tensor(state_value): + raw_length = state_value.numel() - group_paddings[key] + group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone() + else: + group_state[state_name] = state_value + + def _clear_group_paddings(self, sd): + group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) + if group_paddings: + num_groups = len(group_paddings) + sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups + + def _get_optimizer_state(self, sd, state_key): + optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None) + if optimizer_state is None: + return None + + return optimizer_state.get(state_key, None) + + def _get_param_group_states(self, sd): + optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None) + if optimizer_state is None: + return None + + base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None) + if base_optimizer_state is None: + return None + + return base_optimizer_state.get(GROUP_STATE_KEY, None) + + def _update_partition_count(self, sd): + partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT) + if partition_counts: + num_groups = len(partition_counts) + sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c7e1c89387326f7c043f95ff0b2153c4b9f21fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .compress import init_compression, redundancy_clean +from .scheduler import compression_scheduler +from .helper import convert_conv1d_to_linear diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py b/venv/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..bc2b54951bbe9833fb533384cffbef86b513862f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py @@ -0,0 +1,840 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import math +from torch import nn +from torch.nn import init +import deepspeed.comm as dist +from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer +from deepspeed.utils import logger + +g_mpu = None + + +class QuantAct(nn.Module): + """ + Class to quantize given activations. Note that when using this function, the input activation quantization range will be fixed for all + tokens/images for inference. This generally will affect some accuracy but achieve better latency performance. + Parameters: + ---------- + act_range_momentum : float, default 0.95 + Momentum for updating the activation quantization range. + quant_mode : str, default 'symmetric' + """ + + def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'): + super(QuantAct, self).__init__() + + self.act_range_momentum = act_range_momentum + self.quant_mode = quant_mode + if quant_mode == 'symmetric': + self.act_function = SymQuantizer.apply + else: + self.act_function = AsymQuantizer.apply + + self.register_buffer('x_min_max', torch.zeros(2)) + + def forward(self, x, num_bits, *args): + """ + x: the activation that we need to quantize + num_bits: the number of bits we need to quantize the activation to + *args: some extra arguments that are useless but needed for align with the interface of other quantization functions + """ + + if self.training: + x_min = x.data.min() + x_max = x.data.max() + + # Initialization + if self.x_min_max[0] == self.x_min_max[1]: + self.x_min_max[0] = x_min + self.x_min_max[1] = x_max + + # if do not need momentum, please set self.act_range_momentum = 0 + self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum) + self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum) + + x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1]) + + return x_q + + +class Embedding_Compress(nn.Embedding): + + def __init__(self, *kargs): + super(Embedding_Compress, self).__init__(*kargs) + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.weight_quantization_enabled = False + + def extra_repr(self): + return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format( + self.num_embeddings, self.embedding_dim, self.weight.target_bits) + + def enable_weight_quantization(self, start_bits, target_bits, quantization_period, + weight_quantization_enabled_in_forward, quantization_type, num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if self.weight.target_bits >= 3: + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + elif self.weight.target_bits == 2: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization' + self.weight_quantizer = TernaryQuantizer.apply + elif self.weight.target_bits == 1: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization' + self.weight_quantizer = BinaryQuantizer.apply + # for embedding, we always use token-wise quantization + self.weight_quantize_num_groups = self.weight.size(0) + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def forward(self, input): + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups) + else: + weight = self.weight + + out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, + self.scale_grad_by_freq, self.sparse) + return out + + +class LinearLayer_Compress(nn.Linear): + """ + Linear layer with compression. + """ + + def __init__(self, *kargs, bias=True): + super(LinearLayer_Compress, self).__init__(*kargs, bias=bias) + self.sparse_pruning_method = None + self.row_pruning_method = None + self.head_pruning_method = None + self.activation_quantization_method = None + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.weight_quantization_enabled = False + self.sparse_pruning_enabled = False + self.row_pruning_enabled = False + self.head_pruning_enabled = False + self.activation_quantization_enabled = False + + def extra_repr(self): + return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format( + self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \ + self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits) + + def enable_sparse_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.sparse_pruning_ratio = ratio + self.sparse_pruning_method = method + if method == 'l1': + weight_norm = torch.abs(self.weight.data) + mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False) + mask = mask.view(self.weight.size()) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size())) + self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('sparse_pruning_mask', mask) + + def enable_row_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.row_pruning_ratio = ratio + self.row_pruning_method = method + + if method == 'l1': + # compute the l1 norm of each column + weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=1) + mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False) + mask = mask.view(-1, 1) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1)) + self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('row_pruning_mask', mask) + + def enable_head_pruning(self, ratio, method, num_heads): + # Here, we support only topk based pruning + self.num_heads = num_heads + self.head_pruning_ratio = ratio + self.head_pruning_method = method + + if method not in ['topk']: + raise NotImplementedError + else: + self.head_pruning_ratio = ratio + self.head_pruning_scores = nn.Parameter(torch.Tensor(1, + self.num_heads)) # we apply the pruning to O matrix + self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5)) + + def fix_sparse_pruning_helper(self): + mask = self.get_mask(pruning_type='sparse') + self.weight.data = self.weight.data * mask + del self.sparse_pruning_mask + if self.sparse_pruning_method == 'topk': + del self.sparse_mask_scores + self.sparse_pruning_method = None + self.sparse_pruning_enabled = False + return None + + def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False): + # This function is used for row/col pruning + # particularly, if we have two back-to-back layers, F1 and F2; when + # we remove rows from F1, we also need to remove columns from F2 + # However, if we only have one layer, F1, then we only need to mask pruned + # rows as 0 in F1 + if mask is None: + mask = self.get_mask(pruning_type='row').bool() + if dim_reduction: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[mask.view(-1), :]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + self.out_features = self.weight.size(0) + else: + self.weight.data = self.weight.data * mask.view(-1, 1) + if self.bias is not None: + self.bias.data = self.bias.data * mask.view(-1) + + del self.row_pruning_mask + if self.row_pruning_method == 'topk': + del self.row_mask_scores + self.row_pruning_method = None + else: + # this is generally for column pruning + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + self.in_features = self.weight.size(1) + mask = None + self.row_pruning_enabled = False + return mask + + def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False): + # similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix + num_heads = num_heads if num_heads else self.num_heads + if mask is None: + if self.head_pruning_method == 'topk': + mask = self.get_mask(pruning_type='head').bool() + if dim_reduction: + shape = self.weight.size(0) + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads, + -1)[mask.view(-1), :].reshape(-1, + shape).t()) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + else: + + shape = self.weight.size() + self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape( + shape[1], shape[0]).t() + + if self.head_pruning_method == 'topk': + del self.head_pruning_scores + self.head_pruning_method = None + else: + raise NotImplementedError + else: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + shape = self.weight.size(1) + self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape)) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1)) + self.head_pruning_enabled = False + return mask + + def get_mask(self, pruning_type='row'): + if pruning_type == 'sparse': + if self.sparse_pruning_method == 'l1': + return self.sparse_pruning_mask.to(self.weight.device) + elif self.sparse_pruning_method == 'topk': + return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False) + else: + raise NotImplementedError + if pruning_type == 'row': + if self.row_pruning_method == 'l1': + return self.row_pruning_mask.to(self.weight.device) + elif self.row_pruning_method == 'topk': + return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False) + else: + raise NotImplementedError + elif pruning_type == 'head': + if self.head_pruning_method == 'topk': + return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False) + else: + raise NotImplementedError + else: + raise NotImplementedError + + def enable_weight_quantization(self, start_bits, target_bits, quantization_period, + weight_quantization_enabled_in_forward, quantization_type, num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if self.weight.target_bits >= 3: + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + elif self.weight.target_bits == 2: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization' + self.weight_quantizer = TernaryQuantizer.apply + elif self.weight.target_bits == 1: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization' + self.weight_quantizer = BinaryQuantizer.apply + self.weight_quantize_num_groups = num_groups + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def enable_activation_quantization(self, bits, quantization_type, range_calibration): + assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now' + self.activation_quantization_bits = bits + self.activation_quantization_method = f"{quantization_type}_{range_calibration}" + if range_calibration == 'static': + self.activation_quantizer = QuantAct(quant_mode=quantization_type) + else: + if quantization_type == 'symmetric': + self.activation_quantizer = SymQuantizer.apply + else: + self.activation_quantizer = AsymQuantizer.apply + + def head_pruning_reshape(self, w, mask): + shape = w.shape + return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t() + + def forward(self, input, skip_bias_add=False): + + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups) + bias = self.bias + else: + weight = self.weight + bias = self.bias + + if self.sparse_pruning_enabled and self.sparse_pruning_method: + mask = self.get_mask(pruning_type='sparse') + weight = weight * mask.view(self.weight.size()) + + if self.row_pruning_enabled and self.row_pruning_method: + mask = self.get_mask(pruning_type='row') + weight = weight * mask.view(-1, 1) + if bias is not None: + bias = bias * mask.view(-1) + + if self.head_pruning_enabled and self.head_pruning_method: + mask = self.get_mask(pruning_type='head') + weight = self.head_pruning_reshape(weight, mask) + + if self.activation_quantization_enabled: + if 'dynamic' in self.activation_quantization_method: + num_groups = input.numel() // input.size(-1) + else: + num_groups = 1 + input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups) + + if skip_bias_add: + # used for mpu linear layers + output = nn.functional.linear(input, weight, None) + return output, bias + else: + output = nn.functional.linear(input, weight, bias) + return output + + +class Conv2dLayer_Compress(nn.Conv2d): + """ + Conv2D layer with compression. + """ + + def __init__(self, *kargs): + super(Conv2dLayer_Compress, self).__init__(*kargs) + self.sparse_pruning_method = None + self.channel_pruning_method = None + self.activation_quantization_method = None + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.sparse_pruning_enabled = False + self.channel_pruning_enabled = False + self.activation_quantization_enabled = False + + def __repr__(self): + s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' + ', stride={stride}') + if self.padding != (0, ) * len(self.padding): + s += ', padding={padding}' + if self.dilation != (1, ) * len(self.dilation): + s += ', dilation={dilation}' + if self.output_padding != (0, ) * len(self.output_padding): + s += ', output_padding={output_padding}' + if self.groups != 1: + s += ', groups={groups}' + if self.bias is None: + s += ', bias=False' + if self.padding_mode != 'zeros': + s += ', padding_mode={padding_mode}' + output = s.format(**self.__dict__) + + return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format( + self.sparse_pruning_method is not None, self.channel_pruning_method is not None, + self.activation_quantization_method is not None, self.weight.target_bits) + + def enable_sparse_pruning(self, ratio, method): + self.sparse_pruning_ratio = ratio + self.sparse_pruning_method = method + if method == 'l1': + weight_norm = torch.abs(self.weight.data) + mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False) + mask = mask.view(self.weight.size()) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size())) + self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('sparse_pruning_mask', mask) + + def enable_channel_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.channel_pruning_ratio = ratio + self.channel_pruning_method = method + + if method == 'l1': + # compute the l1 norm of each conv2d kernel (the last three dimension) + weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=[1, 2, 3]) + mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False) + mask = mask.view(-1, 1, 1, 1) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1)) + self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('channel_pruning_mask', mask) + + def fix_sparse_pruning_helper(self): + mask = self.get_mask(pruning_type='sparse') + self.weight.data = self.weight.data * mask + del self.sparse_pruning_mask + if self.sparse_pruning_method == 'topk': + del self.sparse_mask_scores + self.sparse_pruning_method = None + self.sparse_pruning_enabled = False + return None + + def fix_channel_pruning_helper(self, mask=None, dim_reduction=False): + if mask is None: + if self.channel_pruning_method in ['l1', 'topk']: + mask = self.get_mask(pruning_type='channel').bool() + if dim_reduction: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + else: + self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1) + if self.bias is not None: + self.bias.data = self.bias.data * mask.view(-1) + del self.channel_pruning_mask + if self.channel_pruning_method == 'topk': + del self.channel_mask_scores + self.channel_pruning_method = None + else: + raise NotImplementedError + else: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + mask = None + self.channel_pruning_enabled = False + return mask + + def get_mask(self, pruning_type='sparse'): + if pruning_type == 'sparse': + if self.sparse_pruning_method == 'l1': + return self.sparse_pruning_mask.to(self.weight.device) + elif self.sparse_pruning_method == 'topk': + return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False) + else: + raise NotImplementedError + elif pruning_type == 'channel': + if self.channel_pruning_method == 'l1': + return self.channel_pruning_mask.to(self.weight.device) + elif self.channel_pruning_method == 'topk': + return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False) + else: + raise NotImplementedError + else: + raise NotImplementedError + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def enable_weight_quantization(self, start_bits, target_bits, quantization_period, + weight_quantization_enabled_in_forward, quantization_type, num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now' + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + self.weight_quantize_num_groups = num_groups + + def enable_activation_quantization(self, bits, quantization_type, range_calibration): + assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now' + self.activation_quantization_bits = bits + self.activation_quantization_method = f"{quantization_type}_{range_calibration}" + if range_calibration == 'static': + self.activation_quantizer = QuantAct(quant_mode=quantization_type) + else: + if quantization_type == 'symmetric': + self.activation_quantizer = SymQuantizer.apply + else: + self.activation_quantizer = AsymQuantizer.apply + + def forward(self, input): + + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None, + self.weight_quantize_num_groups) + bias = self.bias + else: + weight = self.weight + bias = self.bias + + if self.sparse_pruning_enabled and self.sparse_pruning_method: + mask = self.get_mask(pruning_type='sparse') + weight = weight * mask.view(self.weight.size()) + + if self.channel_pruning_enabled: + mask = self.get_mask(pruning_type='channel') + weight = weight * mask.view(-1, 1, 1, 1) + if bias is not None: + bias = bias * mask.view(-1) + + if self.activation_quantization_enabled: + if 'dynamic' in self.activation_quantization_method: + num_groups = input.numel() // input[0].numel() + else: + num_groups = 1 + input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups) + + return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) + + +class BNLayer_Compress(nn.BatchNorm2d): + + def fix_channel_pruning_helper(self, mask, dim_reduction=True): + self.weight = nn.Parameter(self.weight.data[mask.view(-1)]) + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + self.running_mean = self.running_mean[mask.view(-1)] + self.running_var = self.running_var[mask.view(-1)] + + +def _reduce(input_): + """All-reduce the input tensor across model parallel group.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # All-reduce. + dist.all_reduce(input_, group=group) + + return input_ + + +def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + assert tensor.size()[last_dim] % num_partitions == 0 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +def _split(input_): + """Split the tensor along its last dimension and keep the + corresponding slice.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # Split along last dimension. + world_size = dist.get_world_size(group=group) + input_list = split_tensor_along_last_dim(input_, world_size) + + # Note: torch.split does not create contiguous tensors by default. + rank = dist.get_rank(group=group) + output = input_list[rank].contiguous() + + return output + + +def _gather(input_): + """Gather tensors and concatenate along the last dimension.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # Size and dimension. + last_dim = input_.dim() - 1 + rank = dist.get_rank(group=group) + world_size = dist.get_world_size(group=group) + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + dist.all_gather(tensor_list, input_, group=group) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + """Pass the input to the model parallel region.""" + + @staticmethod + def forward(ctx, input_): + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def forward(ctx, input_): + return _reduce(input_) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _ScatterToModelParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def forward(ctx, input_): + return _split(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather(grad_output) + + +class _GatherFromModelParallelRegion(torch.autograd.Function): + """Gather the input from model parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_): + return _gather(input_) + + @staticmethod + def backward(ctx, grad_output): + return _split(grad_output) + + +# ----------------- +# Helper functions. +# ----------------- + + +def copy_to_model_parallel_region(input_): + return _CopyToModelParallelRegion.apply(input_) + + +def reduce_from_model_parallel_region(input_): + return _ReduceFromModelParallelRegion.apply(input_) + + +def scatter_to_model_parallel_region(input_): + return _ScatterToModelParallelRegion.apply(input_) + + +def gather_from_model_parallel_region(input_): + return _GatherFromModelParallelRegion.apply(input_) + + +class ColumnParallelLinear_Compress(LinearLayer_Compress): + + def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False): + # Keep input parameters + global g_mpu + g_mpu = mpu + self.input_size = input_size + self.output_size = output_size + self.gather_output = gather_output + self.skip_bias_add = skip_bias_add + + # Divide the weight matrix along the last dimension. + world_size = mpu.get_model_parallel_world_size() + assert output_size % world_size == 0 + self.output_size_per_partition = output_size // world_size + + super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias) + + def forward(self, input_): + # Set up backprop all-reduce. + input_parallel = copy_to_model_parallel_region(input_) + # Matrix multiply. + if self.skip_bias_add: + output_parallel, bias = super().forward(input_parallel, True) + else: + output_parallel = super().forward(input_parallel) + bias = None + if self.gather_output: + # All-gather across the partitions. + output = gather_from_model_parallel_region(output_parallel) + else: + output = output_parallel + return output, bias + + +class RowParallelLinear_Compress(LinearLayer_Compress): + + def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False): + # Keep input parameters + global g_mpu + g_mpu = mpu + self.input_size = input_size + self.output_size = output_size + self.input_is_parallel = input_is_parallel + self.skip_bias_add = skip_bias_add + + # Divide the weight matrix along the last dimension. + world_size = mpu.get_model_parallel_world_size() + assert input_size % world_size == 0 + self.input_size_per_partition = input_size // world_size + + super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias) + + def forward(self, input_): + # Set up backprop all-reduce. + if self.input_is_parallel: + input_parallel = input_ + else: + input_parallel = scatter_to_model_parallel_region(input_) + # Matrix multiply. + output_parallel, bias = super().forward(input_parallel, True) + + # All-reduce across all the partitions. + output_ = reduce_from_model_parallel_region(output_parallel) + if not self.skip_bias_add: + if bias is not None: + output = output_ + bias + else: + output = output_ + output_bias = None + else: + output = output_ + output_bias = bias + return output, output_bias diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/compress.py b/venv/lib/python3.10/site-packages/deepspeed/compression/compress.py new file mode 100644 index 0000000000000000000000000000000000000000..2f0e88beee21c040708f6fc7f279826f6c7d5c60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/compress.py @@ -0,0 +1,239 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import re +from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible +from .config import get_compression_config +from ..runtime.config_utils import dict_raise_error_on_duplicate_keys +from .constants import * +import os +import json + +try: + import neural_compressor as nc +except ImportError as e: + nc = None + + +def check_deepspeed_config(config): + if isinstance(config, dict): + return config + elif os.path.exists(config): + return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) + else: + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}") + + +def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True): + ''' + get the associated module name from the model based on the key_word provided by users + ''' + return_module_name = [] + for name, module in model.named_modules(): + + module_check = is_module_compressible(module, mpu) + + if re.search(key_word, name) is not None and module_check: + if name in exist_module_name and verbose: + # logger.warning + raise ValueError( + f"{name} is already added to compression, please check your config file for {group_name}.") + if name not in exist_module_name: + exist_module_name.add(name) + return_module_name.append(name) + return return_module_name, exist_module_name + + +def get_compress_methods(model, compress_methods, mpu=None): + # extract the compression module for each method in compress_methods + layer_added_compress_methods = [] + for method, method_content in compress_methods.items(): + if LAYER_REDUCTION in method: + continue + # for loop different methods, i.e., weight quantization, activation quantization etc + exist_module_name = set() + shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters + for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): + # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc + module_name_list = [] + related_module_name_list = [] + if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]: + # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them + # otherwise we just mask those as zeros + for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE], + method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]): + module_name, exist_module_name = get_module_name(group_name, + model, + key_word, + exist_module_name, + mpu=mpu) + module_name_list.append(module_name) + tmp_related_module_name_list = [] + for rkw in related_key_words: + # related key word can be a list, for instance the QKV for O matrix in Attention + module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu) + tmp_related_module_name_list.append(module_name) + related_module_name_list.append(tmp_related_module_name_list) + else: + for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: + module_name, exist_module_name = get_module_name(group_name, + model, + key_word, + exist_module_name, + mpu=mpu) + module_name_list.append(module_name) + + if module_name_list: + # combine shared parameters with each group + combined_method_parameters = { + **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)), + **shared_parameters + } + compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}] + layer_added_compress_methods.append(compression_item) + return layer_added_compress_methods + + +def init_compression(model, deepspeed_config, teacher_model=None, mpu=None): + """ + Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules + Args: + model (`torch.nn.Module`) + The model to compress. + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + mpu + The mpu module for Row/Column parallelism + """ + compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + # For layer reduction + if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]: + assert teacher_model is not None, "Teacher model is required for layer reduction" + student_initialization(c_model, teacher_model, deepspeed_config) + + layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu) + compression_preparation(c_model, layer_added_compress_methods, mpu) + + # For sparse pruning snip_momentum method + shared_parameters = compress_methods[SPARSE_PRUNING][SHARED_PARAMETERS] + if shared_parameters[SPARSE_PRUNING_ENABLED] and \ + shared_parameters[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM: + + assert nc is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning" + + from .helper import generate_pruners, register_on_step_begin + from nc import WeightPruningConfig + + config = WeightPruningConfig(target_sparsity=1 - shared_parameters[SPARSE_PRUNING_DENSE_RATIO], + pattern=shared_parameters[SPARSE_PRUNING_BLOCK_PATTERN], + pruning_frequency=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE], + start_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET], + end_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_END], + excluded_op_names=shared_parameters[SPARSE_PRUNING_EXCLUDED_MODULES]) + pruners = generate_pruners(config, c_model) + c_model.pruners = pruners + register_on_step_begin(c_model) + + return model + + +def redundancy_clean(model, deepspeed_config, mpu=None): + """ + Remove the redundancy of a model + Args: + model (`torch.nn.Module`) + The model to compress. + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + mpu + The mpu module for Row/Column parallelism + """ + compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu) + # sort methods + order_list = [ + WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION + ] + layer_added_compress_methods = sorted(layer_added_compress_methods_tmp, + key=lambda x: order_list.index(list(x[2].keys())[0])) + + for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods: + stored_mask = [] + need_mask = True if related_module_name_lists else False + for i, mnl in enumerate(module_name_lists): + for module_name in mnl: + mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask) + if need_mask: + stored_mask.append(mask) + if need_mask: + for rmnl in related_module_name_lists[i]: + for j, module_name in enumerate(rmnl): + mask = fix_compression(c_model, + module_name, + compression_technique, + mask=stored_mask[j], + dim_reduction=True) + return model + + +def student_initialization(student_model, teacher_model, deepspeed_config): + ''' + Given a student model and a teacher model, select the + Args: + student_model (`torch.nn.Module`) + The model we will update weight + teacher_model (`torch.nn.Module`) + The model guide the student to learn + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + ''' + config = get_compression_config(check_deepspeed_config(deepspeed_config)) + compress_methods = config[LAYER_REDUCTION] + + module_name_prefix = compress_methods[MODULE_NAME_PREFIX] + teacher_layer = compress_methods[TEACHER_LAYER] + student_layer = [i for i in range(len(teacher_layer))] + other_module_name = compress_methods[OTHER_MODULE_NAME] + ''' + name_prefix (`str`) + The prefix name before the layer #. + Example 1: bert.encoder.layer, for BERT_base model's prefix name + Example 2: transformer.h, for GPT-2 hugging face prefix name + teacher_layer (`list of integers`) + The layer of teacher will be used for student's reinitialization + Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student + student_layer (`list` or None) + The layer of student need to be re-initialized + Example 1: None, means we want to reinitialize all the layers + Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers + other_module_name (`list of string`) + The modules will be used for student's reinitialization + Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student + Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embedding layers module to the student + Note that teacher_layer should matches student layer + ''' + assert len(student_layer) == len(teacher_layer) + for s_name, t_name in zip(student_layer, teacher_layer): + s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name)) + t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name)) + for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): + s_param.data.copy_(t_param.data) + for name in other_module_name: + s_module = recursive_getattr(student_model, name) + t_module = recursive_getattr(teacher_model, name) + print(name) + for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): + s_param.data.copy_(t_param.data) diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/config.py b/venv/lib/python3.10/site-packages/deepspeed/compression/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e1fa5ef4bdb5f01fd609ae30c63406f001aaf781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/config.py @@ -0,0 +1,452 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .constants import * +import copy +from ..runtime.config_utils import get_scalar_param, get_list_param + + +def get_compression_config(param_dict): + # + output = {} + + if COMPRESSION_TRAINING not in param_dict.keys(): + param_dict[COMPRESSION_TRAINING] = {} + sub_param_dict = param_dict[COMPRESSION_TRAINING] + output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict) + output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict) + output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict) + output[ROW_PRUNING] = get_row_pruning(sub_param_dict) + output[HEAD_PRUNING] = get_head_pruning(sub_param_dict) + output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict) + + output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict) + + return output + + +def get_layer_reduction(param_dict): + output = {} + output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT + if get_layer_reduction_enabled(param_dict): + output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict) + for key, val in get_layer_reduction_params(param_dict).items(): + output[key] = val + return output + + +def get_layer_reduction_enabled(param_dict): + if LAYER_REDUCTION in param_dict.keys(): + return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT) + else: + return False + + +def get_layer_reduction_params(param_dict): + if LAYER_REDUCTION in param_dict.keys(): + layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION]) + layer_reduction_params.pop(LAYER_REDUCTION_ENABLED) + return layer_reduction_params + else: + return False + + +def get_quantize_enabled(param_dict): + if COMPRESSION_TRAINING not in param_dict.keys(): + return False + + sub_param_dict = param_dict[COMPRESSION_TRAINING] + output = get_weight_quantization_shared_parameters(sub_param_dict) + return output[WEIGHT_QUANTIZE_ENABLED] + + +def get_weight_quantization(param_dict): + output = {} + if WEIGHT_QUANTIZATION not in param_dict.keys(): + param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[WEIGHT_QUANTIZATION] + # shared parameters + output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict) + return output + + +def get_weight_quantization_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED, + WEIGHT_QUANTIZE_ENABLED_DEFAULT) + output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL, + WEIGHT_QUANTIZE_KERNEL_DEFAULT) + output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET, + WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT) + output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS, + WEIGHT_QUANTIZE_GROUPS_DEFAULT) + output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE, + WEIGHT_QUANTIZE_VERBOSE_DEFAULT) + output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE, + WEIGHT_QUANTIZE_TYPE_DEFAULT) + output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict, + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT) + assert output[WEIGHT_QUANTIZE_TYPE] in [ + WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC + ], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]" + output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING, + WEIGHT_QUANTIZE_ROUNDING_DEFAULT) + assert output[WEIGHT_QUANTIZE_ROUNDING] in [ + WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING + ], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]" + if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys(): + output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param( + sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED, + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT) + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param( + sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO, + WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT) + else: + output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT + else: + output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT + output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT + output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT + output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT + output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT + output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT + output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT + return output + + +def get_weight_quantization_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys( + ), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}" + assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys( + ), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}" + group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD, + WEIGHT_QUANTIZATION_PERIOD_DEFAULT) + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_activation_quantization(param_dict): + output = {} + if ACTIVATION_QUANTIZATION not in param_dict.keys(): + param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[ACTIVATION_QUANTIZATION] + # shared parameters + output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict) + return output + + +def get_activation_quantization_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED, + ACTIVATION_QUANTIZATION_ENABLED_DEFAULT) + output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE, + ACTIVATION_QUANTIZE_TYPE_DEFAULT) + assert output[ACTIVATION_QUANTIZE_TYPE] in [ + ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC + ], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]" + output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE, + ACTIVATION_QUANTIZE_RANGE_DEFAULT) + assert output[ACTIVATION_QUANTIZE_RANGE] in [ + ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC + ], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]" + output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, + ACTIVATION_QUANTIZE_SCHEDULE_OFFSET, + ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT) + else: + output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT + output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT + output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT + output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_activation_quantization_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert ACTIVATION_QUANTIZE_BITS in group_dict.keys( + ), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_sparse_pruning(param_dict): + output = {} + if SPARSE_PRUNING not in param_dict.keys(): + param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[SPARSE_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED] and output[SHARED_PARAMETERS][ + SPARSE_PRUNING_METHOD] != SPARSE_PRUNING_METHOD_SNIP_MOMENTUM: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Sparse Pruning is enabled and not snip_momentum method, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict) + return output + + +def get_sparse_pruning_shared_parameters(param_dict): + output = {} + + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED, + SPARSE_PRUNING_ENABLED_DEFAULT) + output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD, + SPARSE_PRUNING_METHOD_DEFAULT) + assert output[SPARSE_PRUNING_METHOD] in [ + SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK, SPARSE_PRUNING_METHOD_SNIP_MOMENTUM + ], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}, {SPARSE_PRUNING_METHOD_SNIP_MOMENTUM}]" + output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET, + SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT) + if output[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM: + output[SPARSE_PRUNING_BLOCK_PATTERN] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_BLOCK_PATTERN, + SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT) + output[SPARSE_PRUNING_DENSE_RATIO] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_DENSE_RATIO, + SPARSE_PRUNING_DENSE_RATIO_DEFAULT) + assert output[SPARSE_PRUNING_DENSE_RATIO] > 0 and output[ + SPARSE_PRUNING_DENSE_RATIO] < 1, f"Invalid dense_ratio value. Must be less than 1" + output[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE] = get_scalar_param( + sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT) + output[SPARSE_PRUNING_EXCLUDED_MODULES] = get_list_param(sub_param_dict, SPARSE_PRUNING_EXCLUDED_MODULES, + SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT) + output[SPARSE_PRUNING_SCHEDULE_OFFSET_END] = get_scalar_param(sub_param_dict, + SPARSE_PRUNING_SCHEDULE_OFFSET_END, + output[SPARSE_PRUNING_SCHEDULE_OFFSET]) + assert output[SPARSE_PRUNING_SCHEDULE_OFFSET] <= output[ + SPARSE_PRUNING_SCHEDULE_OFFSET_END], f"Invalid schedule_offset and schedule_offset_end values" + else: + output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT + output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT + output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_sparse_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys( + ), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_row_pruning(param_dict): + output = {} + if ROW_PRUNING not in param_dict.keys(): + param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[ROW_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict) + return output + + +def get_row_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED, + ROW_PRUNING_ENABLED_DEFAULT) + output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT) + assert output[ROW_PRUNING_METHOD] in [ + ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK + ], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]" + output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET, + ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT) + else: + output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT + output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT + output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_row_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert ROW_PRUNING_DENSE_RATIO in group_dict.keys( + ), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + return output + + +def get_head_pruning(param_dict): + output = {} + if HEAD_PRUNING not in param_dict.keys(): + param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[HEAD_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict) + return output + + +def get_head_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED, + HEAD_PRUNING_ENABLED_DEFAULT) + output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD, + HEAD_PRUNING_METHOD_DEFAULT) + assert output[HEAD_PRUNING_METHOD] in [ + HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK + ], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]" + output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET, + HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT) + if output[HEAD_PRUNING_ENABLED]: + assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys( + ), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning" + output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS] + else: + output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT + output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT + output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_head_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys( + ), f"dense_ratio must be specified for head pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + return output + + +def get_channel_pruning(param_dict): + output = {} + if CHANNEL_PRUNING not in param_dict.keys(): + param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[CHANNEL_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys( + ), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict) + return output + + +def get_channel_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED, + CHANNEL_PRUNING_ENABLED_DEFAULT) + output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD, + CHANNEL_PRUNING_METHOD_DEFAULT) + assert output[CHANNEL_PRUNING_METHOD] in [ + CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK + ], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]" + output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET, + CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT) + else: + output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT + output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT + output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_channel_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys( + ), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/constants.py b/venv/lib/python3.10/site-packages/deepspeed/compression/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..67375d510a4b0a82c8860040042c5e0719507803 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/constants.py @@ -0,0 +1,188 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######################################### +# Compression Methods +# It has several sub-components +# ######################################### +COMPRESSION_TRAINING = "compression_training" +SHARED_PARAMETERS = "shared_parameters" +DIFFERENT_GROUPS = "different_groups" +TECHNIQUE_ENABLED = "enabled" +TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset" +TECHNIQUE_SCHEDULE_OFFSET_END = "schedule_offset_end" +DIFFERENT_GROUPS_PARAMETERS = "params" +DIFFERENT_GROUPS_MODULE_SCOPE = "modules" +DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*" +DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules" +DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None +# COMPRESSION_TRAINING_ENABLED = "enabled" +# COMPRESSION_TRAINING_ENABLED_DEFAULT = False + +#### +# Layer Reduction +#### +LAYER_REDUCTION = "layer_reduction" +LAYER_REDUCTION_ENABLED = "enabled" +LAYER_REDUCTION_ENABLED_DEFAULT = False +KEEP_NUMBER_LAYER = "keep_number_layer" +MODULE_NAME_PREFIX = "module_name_prefix" +TEACHER_LAYER = "teacher_layer" +OTHER_MODULE_NAME = "other_module_name" + +#### +# Weight Quantization +#### +WEIGHT_QUANTIZATION = "weight_quantization" + +WEIGHT_QUANTIZATION_PERIOD = "quantization_period" +WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1 + +WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward" +WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED +WEIGHT_QUANTIZE_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel" +WEIGHT_QUANTIZE_KERNEL_DEFAULT = False + +WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0 + +WEIGHT_QUANTIZE_GROUPS = "quantize_groups" +WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1 + +WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose" +WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False + +WEIGHT_QUANTIZE_TYPE = "quantization_type" +WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric" +WEIGHT_QUANTIZE_SYMMETRIC = "symmetric" +WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric" + +WEIGHT_QUANTIZE_ROUNDING = "rounding" +WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest" +WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic" +WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest" +# maybe deleted for a cleaner version +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize" + +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled" +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio" +WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001 + +WEIGHT_QUANTIZE_START_BITS = "start_bits" +WEIGHT_QUANTIZE_TARGET_BITS = "target_bits" +### +# Activation Quantization +### +ACTIVATION_QUANTIZATION = "activation_quantization" + +ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED +ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False + +ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000 + +ACTIVATION_QUANTIZE_TYPE = "quantization_type" +ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric" +ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric" +ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric" + +ACTIVATION_QUANTIZE_RANGE = 'range_calibration' +ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic' +ACTIVATION_QUANTIZE_RANGE_STATIC = 'static' +ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic' + +ACTIVATION_QUANTIZE_BITS = "bits" +### +# Sparse Pruning +### +SPARSE_PRUNING = "sparse_pruning" + +SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED +SPARSE_PRUNING_ENABLED_DEFAULT = False + +SPARSE_PRUNING_METHOD = "method" +SPARSE_PRUNING_METHOD_DEFAULT = "l1" +SPARSE_PRUNING_METHOD_L1 = "l1" +SPARSE_PRUNING_METHOD_TOPK = "topk" +SPARSE_PRUNING_METHOD_SNIP_MOMENTUM = "snip_momentum" + +SPARSE_PRUNING_BLOCK_PATTERN = "block_pattern" +SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT = "4x1" + +SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE = "schedule_offset_stride" +SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT = 1 + +SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +SPARSE_PRUNING_SCHEDULE_OFFSET_END = TECHNIQUE_SCHEDULE_OFFSET_END +SPARSE_PRUNING_SCHEDULE_OFFSET_END_DEFAULT = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT + +SPARSE_PRUNING_DENSE_RATIO = "dense_ratio" +SPARSE_PRUNING_DENSE_RATIO_DEFAULT = 0.1 + +SPARSE_PRUNING_EXCLUDED_MODULES = "excluded_modules" +SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT = [] +### +# Row Pruning +### +ROW_PRUNING = "row_pruning" + +ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED +ROW_PRUNING_ENABLED_DEFAULT = False + +ROW_PRUNING_METHOD = "method" +ROW_PRUNING_METHOD_DEFAULT = "l1" +ROW_PRUNING_METHOD_L1 = "l1" +ROW_PRUNING_METHOD_TOPK = "topk" + +ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +ROW_PRUNING_DENSE_RATIO = "dense_ratio" + +### +# Head Pruning +### +HEAD_PRUNING = "head_pruning" + +HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED +HEAD_PRUNING_ENABLED_DEFAULT = False + +HEAD_PRUNING_METHOD = "method" +HEAD_PRUNING_METHOD_DEFAULT = "topk" +HEAD_PRUNING_METHOD_L1 = "l1" +HEAD_PRUNING_METHOD_TOPK = "topk" + +HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +HEAD_PRUNING_NUM_HEADS = "num_heads" + +HEAD_PRUNING_DENSE_RATIO = "dense_ratio" + +### +# Channel Pruning +### +CHANNEL_PRUNING = "channel_pruning" + +CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED +CHANNEL_PRUNING_ENABLED_DEFAULT = False + +CHANNEL_PRUNING_METHOD = "method" +CHANNEL_PRUNING_METHOD_DEFAULT = "l1" +CHANNEL_PRUNING_METHOD_L1 = "l1" +CHANNEL_PRUNING_METHOD_TOPK = "topk" + +CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio" diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/helper.py b/venv/lib/python3.10/site-packages/deepspeed/compression/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d66100a4689255eff210465ff9f8cc156b6b0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/helper.py @@ -0,0 +1,322 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress +from .constants import * +from deepspeed.utils import logger + +try: + from neural_compressor.compression import pruner as nc_pruner +except ImportError as e: + nc_pruner = None + + +def recursive_getattr(model, module_name): + """ + Recursively get the attribute of a module. + Args: + model (`torch.nn.Module`) + The model to get the attribute from. + module_name (`str`) + The name of the module to get the attribute from. + """ + split_list = module_name.split('.') + output = model + for name in split_list: + output = getattr(output, name) + return output + + +def recursive_setattr(model, module_name, module): + """ + Recursively set the attribute of a module. + Args: + model (`torch.nn.Module`) + The model to set the attribute in. + module_name (`str`) + The name of the module to set the attribute in. + module (`torch.nn.Module`) + The module to set the attribute to. + """ + split_list = module_name.split('.') + output = model + for name in split_list[:-1]: + output = getattr(output, name) + output.__setattr__(split_list[-1], module) + + +def module_replacement(model, module_name, compression_technique=None, mpu=None): + """ + Replace a module with a new module. + Args: + model (`torch.nn.Module`) + The model to replace the module in. + module_name (`str`) + The name of the module to replace. + compression_technique (`str`) + The compression technique to use for the new module. + """ + + # Get the old module + old_module = recursive_getattr(model, module_name) + + need_bias = False + if hasattr(old_module, 'bias') and old_module.bias is not None: + need_bias = True + + # Initialize the new module + if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear): + if isinstance(old_module, LinearLayer_Compress): + new_module = old_module + else: + new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features, + bias=need_bias).to(device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d): + if isinstance(old_module, Conv2dLayer_Compress): + new_module = old_module + else: + new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \ + old_module.dilation, old_module.groups, need_bias, \ + old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif isinstance(old_module, torch.nn.BatchNorm2d): + new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine, + old_module.track_running_stats).to(old_module.weight.device, + old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + new_module.running_mean.data = old_module.running_mean.data + new_module.running_var.data = old_module.running_var.data + elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding): + if isinstance(old_module, Embedding_Compress): + new_module = old_module + else: + new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \ + old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress) + or isinstance(old_module, mpu.ColumnParallelLinear)): + if isinstance(old_module, ColumnParallelLinear_Compress): + new_module = old_module + else: + new_module = ColumnParallelLinear_Compress(mpu, + old_module.input_size, + old_module.output_size, + gather_output=old_module.gather_output, + skip_bias_add=old_module.skip_bias_add, + bias=need_bias).to(device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress) + or isinstance(old_module, mpu.RowParallelLinear)): + if isinstance(old_module, RowParallelLinear_Compress): + new_module = old_module + else: + new_module = RowParallelLinear_Compress(mpu, + old_module.input_size, + old_module.output_size, + input_is_parallel=old_module.input_is_parallel, + skip_bias_add=old_module.skip_bias_add, + bias=need_bias).to(device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + else: + new_module = None + + if compression_technique is not None: + for k, v in compression_technique.items(): + if k == SPARSE_PRUNING: + if v[SPARSE_PRUNING_ENABLED]: + new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD]) + elif k == ROW_PRUNING: + if v[ROW_PRUNING_ENABLED]: + new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD]) + elif k == HEAD_PRUNING: + if v[HEAD_PRUNING_ENABLED]: + new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD], + v[HEAD_PRUNING_NUM_HEADS]) + elif k == ACTIVATION_QUANTIZATION: + if v[ACTIVATION_QUANTIZATION_ENABLED]: + new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE], + v[ACTIVATION_QUANTIZE_RANGE]) + elif k == WEIGHT_QUANTIZATION: + if v[WEIGHT_QUANTIZE_ENABLED]: + new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS], + v[WEIGHT_QUANTIZE_TARGET_BITS], + v[WEIGHT_QUANTIZATION_PERIOD], + v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED], + v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS]) + elif k == CHANNEL_PRUNING: + if v[CHANNEL_PRUNING_ENABLED]: + new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD]) + else: + raise NotImplementedError('Compression technique {} is not implemented'.format(k)) + + # Replace the old module with the new one + recursive_setattr(model, module_name, new_module) + + +def is_module_compressible(module, mpu=None): + ret = isinstance(module, torch.nn.Linear) or \ + isinstance(module, torch.nn.Conv2d) or \ + isinstance(module, torch.nn.Embedding) or \ + isinstance(module, torch.nn.BatchNorm2d) + + if mpu is not None: + ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear) + + return ret + + +def compression_preparation(model, compression_technique_list, mpu): + """ + Prepare the compression techniques of a model. + Args: + model (`torch.nn.Module`) + The model to prepare the compression techniques of. + compression_technique_list (`list`) + The list of compression techniques to prepare the model to. + list[] + """ + # Here we first replace all module with our linear wrapper + for module_name, module in model.named_modules(): + if is_module_compressible(module, mpu): + module_replacement(model, module_name, mpu=mpu) + for module_name_lists, _, compression_technique in compression_technique_list: + for mnl in module_name_lists: + for module_name in mnl: + module_replacement(model, module_name, compression_technique) + + return model + + +def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False): + """ + Fix the compression technique of a module. + Args: + model (`torch.nn.Module`) + The model to fix the compression technique of. + module_name (`str`) + The name of the module to fix the compression technique of. + compression_technique (`str`) + The compression technique to fix the module to. + """ + # Here we can make things much simpler by just replacing the module + module = recursive_getattr(model, module_name) + for k, v in compression_technique.items(): + if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]: + return module.fix_weight_quantization() + elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]: + return module.fix_sparse_pruning_helper() + elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None): + return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction) + elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None): + return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction) + elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None): + return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction) + + +def convert_conv1d_to_linear(model, convert_type): + ''' + This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF) + ''' + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + for name, module in c_model.named_modules(): + if isinstance(module, convert_type): + old_module = recursive_getattr(c_model, name) + new_module = torch.nn.Linear(old_module.weight.data.size(0), + old_module.weight.data.size(1), + bias=True if old_module.bias is not None else False) + new_module.weight.data = old_module.weight.data.t().contiguous() + if new_module.bias is not None: + new_module.bias.data = old_module.bias.data.view(-1) + + recursive_setattr(c_model, name, new_module) + + return model + + +def generate_pruners(config, model): + """Generate pruners. + Args: + config (`neural_compressor.WeightPruningConfig`) + The object to the class WeightPruningConfig. + model (`torch.nn.module`) + The torch module object to be pruned. + """ + assert nc_pruner is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning" + from nc_pruner.utils import process_config, parse_to_prune + from nc_pruner.pruners import get_pruner + assert isinstance(model, torch.nn.Module) + pruners_info = process_config(config) + pruners = [] + for info in pruners_info: + modules = parse_to_prune(info, model) + if modules == {}: + logger.warning("one pruner hooks no layers, please have a check") + + pruners.append(get_pruner(info, modules)) + info['modules'] = [key for key in modules.keys()] + info['len_of_modules'] = len(info['modules']) + logger.info(info) + return pruners + + +def register_on_step_begin(model): + """Mount on_step_begin to the model. + Args: + model (`torch.nn.module`) + The torch module object to be pruned. + """ + + def hook(module, input): + for pruner in module.pruners: + pruner.on_step_begin(0) + + hook_handle = model.register_forward_pre_hook(hook) + return hook_handle + + +def rewrite_optimizer_step(opt: torch.optim.Optimizer): + """Mount on_before/after_optimizer_step to the optimizer. + Args: + model (`torch.opt.Optimizer`) + The torch optimizer object to be hooked. + """ + + def new_step(self, closure=None): + if hasattr(self, "pruners"): + for pruner in self.pruners: + pruner.on_before_optimizer_step() + + if closure is not None: + res = self.orig_step(closure) + else: + res = self.orig_step() + if hasattr(self, "pruners"): + for pruner in self.pruners: + pruner.on_after_optimizer_step() + return res + + opt.orig_step = opt.step + import types + opt.step = types.MethodType(new_step, opt) + return opt diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/scheduler.py b/venv/lib/python3.10/site-packages/deepspeed/compression/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..85fdb67f642f60c13ed1327e42081fc30dce6a04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/scheduler.py @@ -0,0 +1,173 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .compress import get_module_name +from .constants import * +from .helper import recursive_getattr +from deepspeed.utils import logger + + +class compression_scheduler(): + ''' + Used to schedule different compression methods + ''' + + def __init__(self, model, compression_config): + self.model = model + self.compression_config = compression_config + self.make_init() + self.training_steps = 0 + self.weight_quantization_enabled = False + + self.verbose = { + WEIGHT_QUANTIZATION: False, + ACTIVATION_QUANTIZATION: False, + SPARSE_PRUNING: False, + HEAD_PRUNING: False, + ROW_PRUNING: False, + CHANNEL_PRUNING: False + } + + def make_init(self): + self.different_compression_methods = {} + for method, method_content in self.compression_config.items(): + if LAYER_REDUCTION in method: + continue + self.different_compression_methods[method] = { + TECHNIQUE_ENABLED: False, + SHARED_PARAMETERS: None, + DIFFERENT_GROUPS: [] + } + exist_module_name = set() + shared_parameters = method_content[SHARED_PARAMETERS] + self.different_compression_methods[method][TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED] + self.different_compression_methods[method][SHARED_PARAMETERS] = shared_parameters + + for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): + module_name_list = [] + for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: + module_name, exist_module_name = get_module_name(group_name, + self.model, + key_word, + exist_module_name, + verbose=False) + module_name_list.extend(module_name) + if module_name_list: + self.different_compression_methods[method][DIFFERENT_GROUPS].append( + [group_name, module_name_list, + method_parameters.copy().pop('params')]) + + def check_weight_quantization(self): + # check weight quantization + wq = self.different_compression_methods[WEIGHT_QUANTIZATION] + if not wq[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = wq[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.weight_quantization_enabled = True + + if not self.verbose[WEIGHT_QUANTIZATION]: + logger.info(f'Weight quantization is enabled at step {self.training_steps}') + self.weight_quantization_enabled = True + self.verbose[WEIGHT_QUANTIZATION] = True + + def check_activation_quantization(self): + # check activation quantization + aq = self.different_compression_methods[ACTIVATION_QUANTIZATION] + if not aq[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = aq[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.activation_quantization_enabled = True + if not self.verbose[ACTIVATION_QUANTIZATION]: + logger.info(f'Activation quantization is enabled at step {self.training_steps}') + self.verbose[ACTIVATION_QUANTIZATION] = True + + def check_sparse_pruning(self): + # check sparse pruning + sp = self.different_compression_methods[SPARSE_PRUNING] + if not sp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = sp[SHARED_PARAMETERS] + if shared_parameters[TECHNIQUE_SCHEDULE_OFFSET] <= self.training_steps <= shared_parameters[ + TECHNIQUE_SCHEDULE_OFFSET_END]: + for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.sparse_pruning_enabled = True + if not self.verbose[SPARSE_PRUNING]: + logger.info(f'Sparse pruning is enabled at step {self.training_steps}') + self.verbose[SPARSE_PRUNING] = True + + def check_head_pruning(self): + # check head pruning + hp = self.different_compression_methods[HEAD_PRUNING] + if not hp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = hp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.head_pruning_enabled = True + if not self.verbose[HEAD_PRUNING]: + logger.info(f'Head pruning is enabled at step {self.training_steps}') + self.verbose[HEAD_PRUNING] = True + + def check_row_pruning(self): + # check row pruning + rp = self.different_compression_methods[ROW_PRUNING] + if not rp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = rp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.row_pruning_enabled = True + if not self.verbose[ROW_PRUNING]: + logger.info(f'Row pruning is enabled at step {self.training_steps}') + self.verbose[ROW_PRUNING] = True + + def check_channel_pruning(self): + # check channel pruning + cp = self.different_compression_methods[CHANNEL_PRUNING] + if not cp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = cp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.channel_pruning_enabled = True + if not self.verbose[CHANNEL_PRUNING]: + logger.info(f'Channel pruning is enabled at step {self.training_steps}') + self.verbose[CHANNEL_PRUNING] = True + + def check_all_modules(self): + # check all different compression methods we have + self.check_weight_quantization() + self.check_activation_quantization() + self.check_sparse_pruning() + self.check_head_pruning() + self.check_row_pruning() + self.check_channel_pruning() + + def step(self, step_zero_check=False): + if not step_zero_check: + self.training_steps += 1 + self.check_all_modules() diff --git a/venv/lib/python3.10/site-packages/deepspeed/compression/utils.py b/venv/lib/python3.10/site-packages/deepspeed/compression/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..481e833bdf8ccef9f136155bb83790c52aca8bac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/compression/utils.py @@ -0,0 +1,222 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from torch import autograd +import math + + +class TopKBinarizer(autograd.Function): + """ + Top-k Binarizer. + Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` + is among the k% highest values of S. + Implementation is inspired from: + https://github.com/yaozhewei/MLPruning + """ + + @staticmethod + def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool): + """ + Args: + inputs (`torch.FloatTensor`) + The input matrix from which the binarizer computes the binary mask. + threshold (`float`) + The percentage of weights to keep (the rest is pruned). + `threshold` is a float between 0 and 1. + sigmoid (`bool`) + Whether to apply a sigmoid on the threshold + Returns: + mask (`torch.FloatTensor`) + Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is + retained, 0 - the associated weight is pruned). + """ + # Get the subnetwork by sorting the inputs and using the top threshold + if sigmoid: + threshold = torch.sigmoid(threshold).item() + ctx.sigmoid = sigmoid + mask = inputs.clone() + + _, idx = inputs.flatten().sort(descending=True) + j = math.ceil(threshold * inputs.numel()) + + # flat_out and mask access the same memory. + flat_out = mask.flatten() + flat_out[idx[j:]] = 0. + flat_out[idx[:j]] = 1. + ctx.save_for_backward(mask) + + return mask + + @staticmethod + def backward(ctx, gradOutput): + mask, = ctx.saved_tensors + if ctx.sigmoid: + return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None + else: + return gradOutput.clone(), None, None + + +class SymQuantizer(torch.autograd.Function): + """ + Symmetric quantization + """ + + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int, >=4) + Number of bits to use for quantization + min_value/max_value (torch.FloatTensor) + Used for static activation quantization + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None + and num_groups == 1) + q_range = 2**num_bits + input_shape = input.shape + if min_value is None: + input = input.reshape(num_groups, -1) + max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1) + else: + max_input = torch.max(min_value.abs(), max_value).view(-1) + + scale = 2 * max_input / q_range + output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale + output = output.reshape(input_shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class AsymQuantizer(torch.autograd.Function): + """ + Asymmetric quantization + """ + + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int, >=4) + Number of bits to use for quantization + min_value/max_value (torch.FloatTensor) + Used for static activation quantization + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None + and num_groups == 1) + q_range = 2**num_bits + input_shape = input.shape + if min_value is None: + input = input.reshape(num_groups, -1) + min_value = input.amin(dim=-1, keepdim=True) + max_value = input.amax(dim=-1, keepdim=True) + + scale = (max_value - min_value) / q_range + zero_point = (min_value / scale).round() * scale + + output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point + output = output.reshape(input_shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class TernaryQuantizer(torch.autograd.Function): + """ + Ternary quantization + """ + + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int) + Dummy variable + min_value/max_value (torch.FloatTensor) + Used for static activation quantization; for now they are dummy variable + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None and max_value is None) + input_flat = input.reshape(num_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1).div(n) + thres = (0.7 * m).view(-1, 1) + pos = (input_flat > thres).type(input.type()) + neg = (input_flat < -thres).type(input.type()) + mask = (input_flat.abs() > thres).type(input.type()) + alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1) + output = alpha * pos - alpha * neg + output = output.reshape(input.shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class BinaryQuantizer(torch.autograd.Function): + """ + Binary quantization + """ + + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int) + Dummy variable + min_value/max_value (torch.FloatTensor) + Used for static activation quantization; for now they are dummy variable + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None and max_value is None) + input_flat = input.reshape(num_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1, keepdim=True).div(n) + output = input_flat.sign().mul(m) + output = output.reshape(input.shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa9eb338cb254e62862e0e9d5e78f224dbf126e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05a046ab20c8a465569ebf6e94b11460798fa02e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launch.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8564fa767d69915038ed7712970ebe6784b5b85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launcher_helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launcher_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d110a50ed5d6d6dcba8730a4887ab245d662127 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launcher_helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/multinode_runner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/multinode_runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b175c12c92b4096b674f1daf8474864f63040c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/multinode_runner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/runner.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/runner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a06a11b1e28f4a71f83647f7f2e015143a416633 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/runner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/launcher/launch.py b/venv/lib/python3.10/site-packages/deepspeed/launcher/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..079182a3b45b86455c376511e239924ecff931dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/launcher/launch.py @@ -0,0 +1,359 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +DeepSpeed launcher, this is similar to torch's distributed.launch but supports +additional features such as arbitrary gpu exclusion. + +deepspeed.launcher.launch is intended to be run on a single worker node and +will spawn several worker sub-processes depending on how many devices/ranks +are on the worker. +""" + +import sys +import subprocess +import os +import json +import base64 +import time +import signal +import psutil +from collections import defaultdict +from typing import Dict +from argparse import ArgumentParser, REMAINDER +from deepspeed.accelerator import get_accelerator +from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT +from ..nebula.constants import DLTS_POD_ENV_PATH +from ..utils import logger, get_numactl_cmd +from ..elasticity import is_torch_elastic_compatible +from .constants import ELASTIC_TRAINING_ID_DEFAULT + +PID_FILE_BASEPATH = "/tmp" + + +def parse_args(): + parser = ArgumentParser(description="DeepSpeed distributed training launch" + " utility that creates multiple distributed" + " processes on a single node") + + # Optional arguments for the launch helper + parser.add_argument("--node_rank", + type=int, + default=0, + help="The rank of the node for multi-node distributed " + "training") + parser.add_argument("--master_addr", + default="127.0.0.1", + type=str, + help="Master node (rank 0)'s address, should be either" + " the IP address or the hostname of node 0, for" + " single node multi-proc training, the" + " --master_addr can simply be 127.0.0.1") + parser.add_argument("--master_port", + default=TORCH_DISTRIBUTED_DEFAULT_PORT, + type=int, + help="Master node (rank 0)'s free port that needs to " + "be used for communication during distributed " + "training") + parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary") + + parser.add_argument("--module", + action="store_true", + help="Change each process to interpret the launch " + "script as a Python module, executing with the same " + "behavior as 'python -m'.") + + parser.add_argument("--no_python", + action="store_true", + help="Skip prepending the training script with " + "'python' - just execute it directly.") + + parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.") + + parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.") + + parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.") + + parser.add_argument("--no_local_rank", + action="store_true", + help="Do not pass local_rank as an argument when calling " + "the user's training script.") + + parser.add_argument("--save_pid", + type=int, + default=0, + help="main launching process pid, for internal pid tracking") + + parser.add_argument("--enable_each_rank_log", + default="None", + type=str, + help="redirect the stdout and stderr from each rank into different log files") + + parser.add_argument("--bind_cores_to_rank", + action="store_true", + help="Bind each rank to different cores of the host. " + "This improves host efficiency especially for CPU backend") + + parser.add_argument("--bind_core_list", + type=str, + default=None, + help="List of cores to bind to with comma separated list of " + "numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not " + "specified, all cores on system would be used rank binding") + + # positional + parser.add_argument("training_script", + type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script") + + # rest from the training program + parser.add_argument('training_script_args', nargs=REMAINDER) + return parser.parse_args() + + +# Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree +def terminate_process_tree(pid): + process = psutil.Process(pid) + children = process.children(recursive=True) + children.append(process) + for child in children: + try: + child.terminate() + except psutil.NoSuchProcess: + pass + gone, alive = psutil.wait_procs(children, timeout=30) + for p in alive: + p.kill() + + +def main(): + args = parse_args() + current_env = os.environ.copy() + + for k in current_env.keys(): + if "NCCL" in k: + logger.info(f"{args.node_rank} {k}={current_env[k]}") + + if args.world_info == "None": + raise ValueError("world_info can not be None") + world_info = base64.urlsafe_b64decode(args.world_info) + world_info = json.loads(world_info) + + logger.info(f"WORLD INFO DICT: {world_info}") + node_list = list(world_info.keys()) + args.nnodes = len(node_list) + local_node = node_list[args.node_rank] + local_accelerator_ids = world_info[local_node] + num_local_procs = len(local_accelerator_ids) + logger.info(f"nnodes={args.nnodes}, num_local_procs={num_local_procs}, node_rank={args.node_rank}") + + global_rank_mapping = defaultdict(list) + curr_global_rank = 0 + dist_world_size = 0 + for node_id in node_list: + gids = world_info[node_id] + dist_world_size += len(gids) + for gid in gids: + global_rank_mapping[node_id].append(curr_global_rank) + curr_global_rank += 1 + logger.info(f"global_rank_mapping={global_rank_mapping}") + logger.info(f"dist_world_size={dist_world_size}") + + get_accelerator().set_visible_devices_envs(current_env, local_accelerator_ids) + for env in get_accelerator().visible_devices_envs(): + logger.info(f"Setting {env}={current_env[env]}") + + # set PyTorch distributed related environmental variables + current_env["MASTER_ADDR"] = args.master_addr + current_env["MASTER_PORT"] = str(args.master_port) + current_env["WORLD_SIZE"] = str(dist_world_size) + current_env["CROSS_RANK"] = str(args.node_rank) + current_env["CROSS_SIZE"] = str(args.nnodes) + current_env["LOCAL_SIZE"] = str(num_local_procs) + + if args.save_pid: + print(f"launcher pid: {os.getpid()}") + + pid_file = None + if args.save_pid: + launcher_pid = os.getpid() + pid_file = os.path.join(PID_FILE_BASEPATH, f"{args.save_pid}.deepspeed") + assert not os.path.isfile(pid_file), "pid file exists but shouldn't" + with open(pid_file, 'w') as fd: + fd.write(f"{launcher_pid}") + + if not is_torch_elastic_compatible(): + if args.enable_elastic_training: + logger.info(f"Disabling elastic training support as \ + PyTorch version should be greater than 1.11.x") + args.enable_elastic_training = False + + if os.path.exists(DLTS_POD_ENV_PATH): + with open(DLTS_POD_ENV_PATH) as file: + lines = file.readlines() + lines = [line.rstrip() for line in lines] + for line in lines: + if line.startswith('export FC_TASKROLE_NAME') or line.startswith('export FC_TASK_INDEX'): + key_val = line.split()[1] + key, val = key_val.split('=') + current_env[key] = val + + processes = [] + cmd = [] + + if not args.enable_elastic_training: + if args.enable_each_rank_log != "None": + # prepare the log path and the file name prefix + if os.path.isfile(args.enable_each_rank_log): + raise ValueError(f"{args.enable_each_rank_log} should not be a file, it should be a directory.") + if not os.path.exists(args.enable_each_rank_log): + try: + os.makedirs(args.enable_each_rank_log) + except Exception as e: + print(e) + raise ValueError(f"unable to create directory {args.enable_each_rank_log} for each rank log.") + log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime()) + + for local_proc in range(0, num_local_procs): + # each process's rank + dist_rank = global_rank_mapping[local_node][local_proc] + local_rank = dist_rank % num_local_procs + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + # spawn the processes + cmd = [] + if args.bind_cores_to_rank: + cores_per_rank, numactl_cmd = get_numactl_cmd(args.bind_core_list, num_local_procs, local_rank) + current_env["OMP_NUM_THREADS"] = f"{cores_per_rank}" + cmd = cmd + numactl_cmd + if not args.no_python: + cmd.append(sys.executable) + cmd.append("-u") + if args.module: + cmd.append("-m") + else: + if args.module: + raise ValueError("Don't use both the '--no_python' flag" + " and the '--module' flag at the same time.") + cmd.append(args.training_script) + # A user may not want to pass local_rank as a keyword arg so we make this optional. + if not args.no_local_rank: + cmd.append(f"--local_rank={local_rank}") + cmd += args.training_script_args + + if args.enable_each_rank_log != "None": + log_file = os.path.join(args.enable_each_rank_log, f"{log_name_prefix}_rank{dist_rank}.log") + log_fd = open(log_file, 'w') + process = subprocess.Popen(cmd, env=current_env, stdout=log_fd, stderr=log_fd) + else: + process = subprocess.Popen(cmd, env=current_env) + # logs the command from processes + logger.info(f"process {process.pid} spawned with command: {cmd}") + processes.append(process) + else: + from ..elasticity import DSElasticAgent + from torch.distributed.elastic.rendezvous import RendezvousParameters + from torch.distributed.elastic.agent.server.api import WorkerSpec + import torch.distributed.elastic.rendezvous.registry as rdzv_registry + from torch.distributed.elastic.multiprocessing import Std + + if args.min_elastic_nodes == -1: + args.min_elastic_nodes = 1 + if args.max_elastic_nodes == -1: + args.max_elastic_nodes = args.nnodes + assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0, "Max and Min nodes should be positive" + + current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1) + + # Get config and arguments + cmd = [] + if not args.no_python: + cmd = [sys.executable, "-u"] + if args.module: + cmd.append("-m") + else: + if args.module: + raise ValueError("Don't use both the '--no_python' flag" + " and the '--module' flag at the same time.") + cmd.append(args.training_script) + cmd += args.training_script_args + cmd_args = cmd[1:] + + rdzv_configs: Dict[str, str] = {'timeout': 100} + run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT) + + # Creating config for rendezvous class + rdzv_parameters = RendezvousParameters(backend='c10d', + endpoint=args.master_addr + ":" + str(args.master_port), + run_id=run_id, + min_nodes=args.min_elastic_nodes, + max_nodes=args.max_elastic_nodes, + **rdzv_configs) + + spec = WorkerSpec( + role='trainer', + local_world_size=num_local_procs, + entrypoint=cmd[0], + args=cmd[1:], + rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), + max_restarts=100, + monitor_interval=5, + redirects=Std.from_str("0"), + tee=Std.from_str("0"), + master_addr=None, + master_port=None, + ) + agent = DSElasticAgent(spec, current_env) + agent.run() + + sig_names = {2: "SIGINT", 15: "SIGTERM"} + last_return_code = None + + def sigkill_handler(signum, frame): + for process in processes: + logger.info(f"Killing subprocess {process.pid}") + try: + terminate_process_tree(process.pid) + except Exception: + pass + if last_return_code is not None: + logger.error(f"{cmd} exits with return code = {last_return_code}") + sys.exit(last_return_code) + if signum in sig_names: + logger.info(f"Main process received {sig_names[signum]}, exiting") + if args.save_pid: + if os.path.isfile(pid_file): + os.remove(pid_file) + sys.exit(1) + + # pass SIGINT/SIGTERM to children if the parent is being terminated + signal.signal(signal.SIGINT, sigkill_handler) + signal.signal(signal.SIGTERM, sigkill_handler) + + alive_processes = set(processes) + while len(alive_processes): + finished_processes = [] + for process in alive_processes: + if process.poll() is None: + # the process is still running + continue + else: + if process.returncode != 0: + last_return_code = process.returncode # for sigkill_handler + sigkill_handler(signal.SIGTERM, None) # not coming back + else: + # exited cleanly + logger.info(f"Process {process.pid} exits successfully.") + finished_processes.append(process) + alive_processes = set(alive_processes) - set(finished_processes) + + time.sleep(1) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7104cab88afbeff9f9873c5cf4971a1f015e6d20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf2e4721cd61ec3f548d652698f62b7a50772d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_adagrad import DeepSpeedCPUAdagrad diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d47e09c4f5c6c359fbbb86072ec6f17ad392e59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/cpu_adagrad.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d23ba853c642397368574f31b5ec3b00b72d06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/cpu_adagrad.py b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..c356a52777f25a9d0fd4b4a1dccd1d238497770b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/cpu_adagrad.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.ops.op_builder import CPUAdagradBuilder +from deepspeed.utils.logging import should_log_le + + +class DeepSpeedCPUAdagrad(torch.optim.Optimizer): + optimizer_id = 0 + + def __init__(self, model_params, lr=1e-2, eps=1e-10, weight_decay=0, amsgrad=False, fp32_optimizer_states=True): + + default_args = dict(lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) + super(DeepSpeedCPUAdagrad, self).__init__(model_params, default_args) + + self.opt_id = DeepSpeedCPUAdagrad.optimizer_id + DeepSpeedCPUAdagrad.optimizer_id = DeepSpeedCPUAdagrad.optimizer_id + 1 + self.fp32_optimizer_states = fp32_optimizer_states + self.ds_opt_adagrad = CPUAdagradBuilder().load() + + self.ds_opt_adagrad.create_adagrad(self.opt_id, lr, eps, weight_decay, should_log_le("info")) + + def __del__(self): + # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize + # is used multiple times in the same process (notebook or pytest worker) + self.ds_opt_adagrad.destroy_adagrad(self.opt_id) + + def __setstate__(self, state): + super(DeepSpeedCPUAdagrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None, fp16_param_groups=None): + """Update the model parameters. + + .. note:: + This method will be called internally by ZeRO-Offload. DeepSpeed + users should still use ``engine.step()`` as shown in the + `Getting Started + `_ guide. + + Args: + closure (callable, optional): closure to compute the loss. + Defaults to ``None``. + fp16_param_groups: FP16 GPU parameters to update. Performing the + copy here reduces communication time. Defaults to ``None``. + + Returns: + loss: if ``closure`` is provided. Otherwise ``None``. + """ + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + # intended device for step + device = torch.device('cpu') + + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + + if p.grad is None: + continue + + assert p.device == device, f"CPUAdagrad param is on {p.device} and must be 'cpu', make " \ + "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config." + + state = self.state[p] + # State initialization + if len(state) == 0: + #print(f'group {group_id} param {param_id} = {p.numel()}') + state['step'] = 0 + + #use full precision by default unless self.fp32_optimizer_states is off + state_dtype = torch.float if self.fp32_optimizer_states else p.dtype + + #memory_format=torch.preserve_format) + # gradient variances + state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device='cpu') + #memory_format=torch.preserve_format) + + state['step'] += 1 + + if p.grad.is_sparse == True: + sparse_param = p.sparse_mask(p.grad) + sparse_exp_avg_sq = state['exp_avg_sq'].sparse_mask(p.grad) + self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'], + group['weight_decay'], sparse_param.values(), p.grad.values(), + sparse_exp_avg_sq.values()) + p[sparse_param.indices()] = sparse_param.values() + state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values() + if fp16_param_groups is not None: + fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values() + else: + if fp16_param_groups is not None: + self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'], + group['weight_decay'], p.data, p.grad.data, + state['exp_avg_sq'], + fp16_param_groups[group_id][param_id].data) + else: + self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'], + group['weight_decay'], p.data, p.grad.data, + state['exp_avg_sq']) + return loss diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..81bc07e827c88e3f7c134eab78c761813fdf7826 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .fused_lamb import FusedLamb diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e8d5753b99113f3a4549514db2949834e64b657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a059293df196b0aa6238a257d2c63d42a12bf6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/fused_lamb.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/fused_lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..6ccd9d4c6b066601bb2f6e5d8d8def6be09fd22b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lamb/fused_lamb.py @@ -0,0 +1,174 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer +""" +import types +import torch +from deepspeed.ops.op_builder import FusedLambBuilder + + +class FusedLamb(torch.optim.Optimizer): + """Implements the LAMB algorithm. Currently GPU-only. + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. + https://arxiv.org/abs/1904.00962 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + bias_correction (bool, optional): bias correction (default: True) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + max_grad_norm (float, optional): value used to clip global grad norm + (default: 0.0) + max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0) + min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01) + amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb! + """ + + def __init__(self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + eps_inside_sqrt=False, + weight_decay=0., + max_grad_norm=0., + max_coeff=10.0, + min_coeff=0.01, + amsgrad=False): + self.fused_lamb_cuda = FusedLambBuilder().load() + + if amsgrad: + raise RuntimeError('FusedLamb does not support the AMSGrad variant.') + defaults = dict(lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + max_coeff=max_coeff, + min_coeff=min_coeff) + super(FusedLamb, self).__init__(params, defaults) + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.lamb_coeffs = [] + + def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + output params (list of tensors, optional): A reduced precision copy + of the updated weights written out in addition to the regular + updated weights. Have to be of same type as gradients. (default: None) + scale (float, optional): factor to divide gradient tensor values + by before applying to weights. (default: 1) + """ + loss = None + if closure is not None: + loss = closure() + + if grads is None: + grads_group = [None] * len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0]) != list: + grads_group = [grads] + else: + grads_group = grads + + if output_params is None: + output_params_group = [None] * len(self.param_groups) + elif isinstance(output_params, types.GeneratorType): + output_params_group = [output_params] + elif type(output_params[0]) != list: + output_params_group = [output_params] + else: + output_params_group = output_params + + if grad_norms is None: + grad_norms = [None] * len(self.param_groups) + + #remove the previous coeffs + del self.lamb_coeffs[:] + + for group, grads_this_group, output_params_this_group, grad_norm_group in zip( + self.param_groups, grads_group, output_params_group, grad_norms): + if grads_this_group is None: + grads_this_group = [None] * len(group['params']) + if output_params_this_group is None: + output_params_this_group = [None] * len(group['params']) + + if grad_norm_group is None: + grad_norm_group = [None] * len(group['params']) + elif not isinstance(grad_norm_group, list): + grad_norm_group = [grad_norm_group] + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group, + grad_norm_group): + + # compute combined scale factor for this group + combined_scale = scale + if group['max_grad_norm'] > 0: + # norm is in fact norm*scale + clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm'] + if clip > 1: + combined_scale = clip * scale + + #note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('FusedLamb does not support sparse gradients') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + max_coeff = group['max_coeff'] + min_coeff = group['min_coeff'] + + state['step'] += 1 + + out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param + lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, + beta2, max_coeff, min_coeff, group['eps'], combined_scale, + state['step'], self.eps_mode, bias_correction, + group['weight_decay']) + self.lamb_coeffs.append(lamb_coeff) + return loss + + def get_lamb_coeffs(self): + lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs] + return lamb_coeffs diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2f90e5ec2e808057a26cc36f2d7caedecb44c86e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cpu_lion import DeepSpeedCPULion +from .fused_lion import FusedLion diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01f8302cb868e17a2e0fe8ff3d47ce681feca40d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/cpu_lion.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/cpu_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d6366e89ed1bd51aac31c9109ef6d82c43c69d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/cpu_lion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/fused_lion.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/fused_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ce58d48c35948ae568384c80bb2615a5c5e5557 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/fused_lion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/multi_tensor_apply.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/multi_tensor_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..267d309af664dbe04c8aefe23438f88ad9f34e50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/multi_tensor_apply.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/cpu_lion.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/cpu_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..a91a00643873d07d99490a73eeed57f2c8eb618a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/cpu_lion.py @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from cpuinfo import get_cpu_info +from deepspeed.utils import logger +from deepspeed.utils.logging import should_log_le +from deepspeed.ops.op_builder import CPULionBuilder + + +class DeepSpeedCPULion(torch.optim.Optimizer): + optimizer_id = 0 + + def __init__(self, model_params, lr=1e-3, betas=(0.9, 0.999), weight_decay=0, fp32_optimizer_states=True): + """Fast vectorized implementation of Lion optimizer on CPU: + + See Symbolic Discovery of Optimization Algorithms (https://doi.org/10.48550/arXiv.2302.06675). + + .. note:: + We recommend using our `config + `_ + to allow :meth:`deepspeed.initialize` to build this optimizer + for you. + + + Arguments: + model_params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + full_precision_optimizer_states: creates momentum and variance in full precision regardless of + the precision of the parameters (default: True) + """ + + default_args = dict(lr=lr, betas=betas, weight_decay=weight_decay) + super(DeepSpeedCPULion, self).__init__(model_params, default_args) + + cpu_info = get_cpu_info() + self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown" + if "amd" in self.cpu_vendor: + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + if p.dtype == torch.half: + logger.warning("FP16 params for CPULion may not work on AMD CPUs") + break + else: + continue + break + + self.opt_id = DeepSpeedCPULion.optimizer_id + DeepSpeedCPULion.optimizer_id = DeepSpeedCPULion.optimizer_id + 1 + self.fp32_optimizer_states = fp32_optimizer_states + self.ds_opt_lion = CPULionBuilder().load() + + self.ds_opt_lion.create_lion(self.opt_id, lr, betas[0], betas[1], weight_decay, should_log_le("info")) + + def __del__(self): + # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize + # is used multiple times in the same process (notebook or pytest worker) + self.ds_opt_lion.destroy_lion(self.opt_id) + + def __setstate__(self, state): + super(DeepSpeedCPULion, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None, fp16_param_groups=None): + """Update the model parameters. + + .. note:: + This method will be called internally by ZeRO-Offload. DeepSpeed + users should still use ``engine.step()`` as shown in the + `Getting Started + `_ guide. + + Args: + closure (callable, optional): closure to compute the loss. + Defaults to ``None``. + fp16_param_groups: FP16 GPU parameters to update. Performing the + copy here reduces communication time. Defaults to ``None``. + + Returns: + loss: if ``closure`` is provided. Otherwise ``None``. + """ + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + # intended device for step + device = torch.device('cpu') + + # converting the fp16 params to a group of parameter + if type(fp16_param_groups) is list: + if type(fp16_param_groups[0]) is not list: + fp16_param_groups = [fp16_param_groups] + elif fp16_param_groups is not None: + fp16_param_groups = [[fp16_param_groups]] + + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + + if p.grad is None: + continue + + assert p.device == device, f"CPULion param is on {p.device} and must be 'cpu', make " \ + "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config." + + state = self.state[p] + # State initialization + if len(state) == 0: + #print(f'group {group_id} param {param_id} = {p.numel()}') + state['step'] = 0 + + #use full precision by default unless self.fp32_optimizer_states is off + state_dtype = torch.float if self.fp32_optimizer_states else p.dtype + + # gradient momentums + state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + # gradient variances + state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device) + #memory_format=torch.preserve_format) + + state['step'] += 1 + beta1, beta2 = group['betas'] + + if fp16_param_groups is not None: + self.ds_opt_lion.lion_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2, + group['weight_decay'], p.data, p.grad.data, state['exp_avg'], + fp16_param_groups[group_id][param_id].data) + else: + self.ds_opt_lion.lion_update(self.opt_id, state['step'], group['lr'], beta1, beta2, + group['weight_decay'], p.data, p.grad.data, state['exp_avg']) + return loss diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/fused_lion.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/fused_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..7332a7f96361a1a05d770d945a90efe6b24ef217 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/fused_lion.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +This file is modified from fused_adam.py +""" + +import torch +from .multi_tensor_apply import MultiTensorApply + +multi_tensor_applier = MultiTensorApply(2048 * 32) +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedLionBuilder + + +class FusedLion(torch.optim.Optimizer): + """Implements Lion algorithm. + + Currently GPU-only. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + + .. _Symbolic Discovery of Optimization Algorithms: + https://doi.org/10.48550/arXiv.2302.06675 + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), weight_decay=0., set_grad_none=True): + + defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay) + super(FusedLion, self).__init__(params, defaults) + self.set_grad_none = set_grad_none + + fused_lion_cuda = FusedLionBuilder().load() + # Skip buffer + self._dummy_overflow_buf = get_accelerator().IntTensor([0]) + self.multi_tensor_lion = fused_lion_cuda.multi_tensor_lion + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedLion, self).zero_grad() + + def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes. + """ + if any(p is not None for p in [grads, output_params, scale, grad_norms]): + raise RuntimeError('FusedLion has been updated.') + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + if len(group['params']) == 0: + continue + beta1, beta2 = group['betas'] + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' not in group: + group['step'] = 0 + + # create lists for multi-tensor apply + g_16, p_16, m_16 = [], [], [] + g_bf, p_bf, m_bf = [], [], [] + g_32, p_32, m_32 = [], [], [] + + for p in group['params']: + if p.grad is None: + continue + if p.grad.data.is_sparse: + raise NotImplementedError('FusedLion does not support sparse gradients') + + state = self.state[p] + # State initialization + if len(state) == 0: + # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately. + # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time. + # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists. + state['step'] = group.get('step', 0) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + + if p.dtype == torch.float16: + g_16.append(p.grad.data) + p_16.append(p.data) + m_16.append(state['exp_avg']) + elif p.dtype == torch.bfloat16: + g_bf.append(p.grad) + p_bf.append(p) + m_bf.append(state['exp_avg']) + elif p.dtype == torch.float32: + g_32.append(p.grad.data) + p_32.append(p.data) + m_32.append(state['exp_avg']) + else: + raise RuntimeError('FusedLion only support fp16, bf16 and fp32.') + + if len(g_16) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_16, p_16, m_16], group['lr'], + beta1, beta2, state['step'], group['weight_decay']) + + if len(g_bf) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_bf, p_bf, m_bf], group['lr'], + beta1, beta2, state['step'], group['weight_decay']) + + if len(g_32) > 0: + state['step'] += 1 + multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_32, p_32, m_32], group['lr'], + beta1, beta2, state['step'], group['weight_decay']) + + return loss diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/lion/multi_tensor_apply.py b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/multi_tensor_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba228505cef747eea4fec62f3e68707fa4daa0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/lion/multi_tensor_apply.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from NVIDIA/apex, commit a109f85 +""" + + +class MultiTensorApply(object): + + def __init__(self, chunk_size): + self.chunk_size = chunk_size + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6527ace087b5f4de8ad40065824fdd0f0e0fe873 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' + +from .cpu_adam import CPUAdamBuilder +from .fused_adam import FusedAdamBuilder +from .no_impl import NotImplementedBuilder diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1b52e65611d07b7ee5af995747ab1d818ab912b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8b1e7ddaff7dd2079e839a5c01d40e7cd1f3dee Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e704c05093d0a3e8ec7fef95478936c4d91008d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c5448699693b0a4636284a118cc9d4c4e234637 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa28c4283d32c2282241a10107be755dc5a58ad1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/__pycache__/no_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3c86128fffd6eddc302bcec06b02474086afa44a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/builder.py @@ -0,0 +1,37 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class CPUOpBuilder(OpBuilder): + + def builder(self): + from torch.utils.cpp_extension import CppExtension as ExtensionBuilder + include_dirs = [os.path.abspath(x) for x in self.strip_empty_entries(self.include_paths())] + compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} + + cpp_ext = ExtensionBuilder(name=self.absolute_name(), + sources=self.strip_empty_entries(self.sources()), + include_dirs=include_dirs, + libraries=self.strip_empty_entries(self.libraries_args()), + extra_compile_args=compile_args) + + return cpp_ext + + def cxx_args(self): + return ['-O3', '-g', '-Wno-reorder'] + + def libraries_args(self): + return [] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/cpu_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/cpu_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..2f3b7aefe7059f22e060e40918d68e26bd901231 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/cpu_adam.py @@ -0,0 +1,33 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class CPUAdamBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAM" + NAME = "cpu_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def cxx_args(self): + args = super().cxx_args() + args += ['-DENABLE_BFLOAT16'] + return args + + def libraries_args(self): + args = super().libraries_args() + return args + + def include_paths(self): + return ['csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..d77228317ddb42930aaf6fbdee0d3b0e61b5cfd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/fused_adam.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class FusedAdamBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_FUSED_ADAM" + NAME = "fused_adam" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adam.{self.NAME}_op' + + def sources(self): + return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] + + def cxx_args(self): + args = super().cxx_args() + args += ['-DENABLE_BFLOAT16'] + return args + + def include_paths(self): + return ['csrc/includes'] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..140d65b48defbea439483b63fd545e52de92209e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/hpu/no_impl.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import CPUOpBuilder + + +class NotImplementedBuilder(CPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on HPU backend.") + + def sources(self): + return [] diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e512e25a2a29e1b463bdc373722a64c755b89098 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' + +from .fused_adam import FusedAdamBuilder +from .async_io import AsyncIOBuilder +from .no_impl import NotImplementedBuilder +from .cpu_adam import CPUAdamBuilder +from .cpu_adagrad import CPUAdagradBuilder +from .cpu_lion import CPULionBuilder +from .inference import InferenceBuilder diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc40697c48b62224b52de4b9a906d1ea36954609 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54b657c62abe1e5d109aa5dcf153732af07c34a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/async_io.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9fc120eda8eb97333cffc1cf4c5d71da4a6b4c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f798907fc5913603930636a2afb6c8c6b6a76d61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adagrad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52790d0c9aa946b636ab12c422410ec5523b785d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17d1bdc17a811616f85f606ba160f3b6d0b39fa1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/cpu_lion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d5a00f5c1c4d40c225260b8e554e8d8585896d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/fused_adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0502d60ea591203d59fc43f90bc78965912457f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1eee56359c0fda0a5d1af4338009889dfb446d59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/__pycache__/no_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..0dea2e78915e2a939d5c1839c2e2d88e948e10f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/builder.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import re +import os +try: + import torch_npu +except ImportError as e: + pass + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class NPUOpBuilder(OpBuilder): + _ascend_path = None + _torch_npu_path = None + _cann_version = None + + def __init__(self, name): + super().__init__(name) + self._ascend_path = self.installed_cann_path() + self._torch_npu_path = os.path.join(os.path.dirname(os.path.abspath(torch_npu.__file__))) + try: + self._cann_version = self.installed_cann_version(self.name) + except BaseException: + print(f"{self.name} ascend_cann is missing, npu ops cannot be compiled!") + + def cann_defs(self): + if self._cann_version: + return '-D__ENABLE_CANN__' + return '-D__DISABLE_CANN__' + + def installed_cann_path(self): + if "ASCEND_HOME_PATH" in os.environ or os.path.exists(os.environ["ASCEND_HOME_PATH"]): + return os.environ["ASCEND_HOME_PATH"] + return None + + def installed_cann_version(self, name=""): + ascend_path = self.installed_cann_path() + assert ascend_path is not None, "CANN_HOME does not exist, unable to compile NPU op(s)" + cann_version = "" + for dirpath, _, filenames in os.walk(os.path.realpath(ascend_path)): + if cann_version: + break + install_files = [file for file in filenames if re.match(r"ascend_.*_install\.info", file)] + if install_files: + filepath = os.path.join(dirpath, install_files[0]) + with open(filepath, "r") as f: + for line in f: + if line.find("version") != -1: + cann_version = line.strip().split("=")[-1] + break + return cann_version + + def include_paths(self): + paths = super().include_paths() + paths += [os.path.join(self._ascend_path, 'include'), os.path.join(self._torch_npu_path, 'include')] + return paths + + def cxx_args(self): + args = super().cxx_args() + args += ['-O3', '-std=c++17', '-g', '-Wno-reorder', '-fopenmp'] + args += ['-fstack-protector-all', '-Wl,-z,relro,-z,now,-z,noexecstack', '-Wl,--disable-new-dtags,--rpath'] + args += [ + self.cann_defs(), + self.cpu_arch(), + self.simd_width(), '-L' + os.path.join(self._ascend_path, 'lib64'), + '-L' + os.path.join(self._torch_npu_path, 'lib') + ] + return args + + def extra_ldflags(self): + flags = super().extra_ldflags() + flags += [ + '-L' + os.path.join(self._ascend_path, 'lib64'), '-lascendcl', + '-L' + os.path.join(self._torch_npu_path, 'lib'), '-ltorch_npu' + ] + return flags diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..161bc82efe1ca01660fdeedd30079a8f10f1d269 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_adagrad.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPUAdagradBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" + NAME = "cpu_adagrad" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.adagrad.{self.NAME}_op' + + def sources(self): + return ['csrc/adagrad/cpu_adagrad.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py new file mode 100644 index 0000000000000000000000000000000000000000..6917e0fd03d08dec42e71479110224d577b55b5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/cpu_lion.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class CPULionBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_CPU_LION" + NAME = "cpu_lion" + + def __init__(self): + super().__init__(name=self.NAME) + + def absolute_name(self): + return f'deepspeed.ops.lion.{self.NAME}_op' + + def sources(self): + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] + + def include_paths(self): + args = super().include_paths() + args += ['csrc/includes'] + return args diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1771fabc22f6ad13161231900a737bba733e68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/op_builder/npu/no_impl.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .builder import NPUOpBuilder + + +class NotImplementedBuilder(NPUOpBuilder): + BUILD_VAR = "DS_BUILD_NOT_IMPLEMENTED" + NAME = "deepspeed_not_implemented" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=name) + + def absolute_name(self): + return f'deepspeed.ops.comm.{self.NAME}_op' + + def load(self, verbose=True): + raise ValueError("This op had not been implemented on NPU backend.") + + def sources(self): + return [] + + def cxx_args(self): + return [] + + def extra_ldflags(self): + return [] + + def include_paths(self): + return [] diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..e1f109fd6086eb97a3be2e7533dc658dac0970d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e900b190795224ff48e46a1c02b10020d4c986ba142880c02c86f0b472ded3c9 +size 309 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d5feb2e1a57bf4ba4d811dbff391977f38122fed --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_2/api-v1-jdl-dn-anneal-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff6225cb98260ca4ebec015a1a2754f2a7b0dbfb4d0f17dcf6727542154e2a10 +size 346 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c3454ff8e5a399b14e2033d6122315c4e4b2dbfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jd-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59d1aa6b02d2358c16fa9e4fbeff523a3bd10ebd38c7c371911fa8335e7bdcbf +size 598 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-dv-3.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-dv-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..ed4efacefc3e856c3ad56407f2d195c65c61e4bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdl-dn-emotions-l-2-dv-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19e6b2a2a8fec5403c146642a4dc2e077d66a3a1ac87e8239bd1dd31c928ab9c +size 315 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..49d394f0c458c02f7d9781445ef870cf8f747e0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/api-v1-jdq-40589.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0f7973193eb35d19e99d1d8bca3c7f3a8b8d0410508af34ad571aee8ec5ab05 +size 913 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/data-v1-dl-4644182.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/data-v1-dl-4644182.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..3c0efffc333c6ef0622ed3d3e3c95d3e319fc05f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40589/data-v1-dl-4644182.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c4226550827ceff3509c67179c473e14385cee206536362e57c5e0dfc7751bf +size 4344 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..466d7fab3f54e053ae4abc1044c671ac525accc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdf-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33cbd6ae945ba04969370ab35604e9363c87256393493382b5118a89d59386d6 +size 268 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..76bb2da49d2e31a888153004b5177dc2a0c2f46c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bce20aae7fd903796d96d5b3a3677b7058fbc5f3fe0996ee9d491e4ee23d132 +size 293 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..c628aa1d9076067123d34c4c392a3a215dae524b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdl-dn-iris-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9f4b9317997df63ed8d2bb073a3906344c0e0be017fd384eaec36ced8b94bae +size 330 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..285c038aae89afa2eb0c334cdf28a9d0f6e2cb32 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_61/api-v1-jdq-61.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:424cd47c12a51c7bb8d8169fac80fb5601f152bd78468b241d4b115bf7d22f20 +size 1121