applied-ai-018 commited on
Commit
179036e
·
verified ·
1 Parent(s): e19ef9b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py +20 -0
  2. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/universal_checkpoint.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/utils.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py +87 -0
  11. venv/lib/python3.10/site-packages/deepspeed/checkpoint/deepspeed_checkpoint.py +294 -0
  12. venv/lib/python3.10/site-packages/deepspeed/checkpoint/ds_to_universal.py +401 -0
  13. venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py +111 -0
  14. venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py +222 -0
  15. venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py +113 -0
  16. venv/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py +146 -0
  17. venv/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py +62 -0
  18. venv/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py +142 -0
  19. venv/lib/python3.10/site-packages/deepspeed/compression/__init__.py +8 -0
  20. venv/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py +840 -0
  21. venv/lib/python3.10/site-packages/deepspeed/compression/compress.py +239 -0
  22. venv/lib/python3.10/site-packages/deepspeed/compression/config.py +452 -0
  23. venv/lib/python3.10/site-packages/deepspeed/compression/constants.py +188 -0
  24. venv/lib/python3.10/site-packages/deepspeed/compression/helper.py +322 -0
  25. venv/lib/python3.10/site-packages/deepspeed/compression/scheduler.py +173 -0
  26. venv/lib/python3.10/site-packages/deepspeed/compression/utils.py +222 -0
  27. venv/lib/python3.10/site-packages/deepspeed/launcher/__init__.py +5 -0
  28. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/constants.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launch.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launcher_helper.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/multinode_runner.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/runner.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/launcher/launch.py +359 -0
  35. venv/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__init__.py +6 -0
  37. venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/cpu_adagrad.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/cpu_adagrad.py +109 -0
  40. venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__init__.py +6 -0
  41. venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/deepspeed/ops/lamb/fused_lamb.py +174 -0
  44. venv/lib/python3.10/site-packages/deepspeed/ops/lion/__init__.py +7 -0
  45. venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/cpu_lion.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/fused_lion.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/multi_tensor_apply.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/lion/cpu_lion.py +141 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/lion/fused_lion.py +131 -0
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_meg_2d import reshape_meg_2d_parallel
7
+
8
+ from .deepspeed_checkpoint import DeepSpeedCheckpoint
9
+
10
+ from .utils import (get_layer_ckpt_name_for_rank, get_model_ckpt_name_for_rank, get_zero_ckpt_name_for_rank)
11
+
12
+ from .reshape_utils import (merge_state)
13
+
14
+ from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
15
+
16
+ from .zero_checkpoint import ZeROCheckpoint
17
+
18
+ from .universal_checkpoint import enable_universal_checkpoint, SubparamShape
19
+
20
+ from .constants import *
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (762 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/constants.cpython-310.pyc ADDED
Binary file (2.33 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/deepspeed_checkpoint.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_3d_utils.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_meg_2d.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/reshape_utils.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/universal_checkpoint.cpython-310.pyc ADDED
Binary file (3.31 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/checkpoint/constants.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Various symbolic constants used for model checkpointing
7
+ """
8
+
9
+ #########################################
10
+ # Optimizer checkpoint keys
11
+ #########################################
12
+ OPTIMIZER_STATE_DICT = "optimizer_state_dict"
13
+ FP32_GROUPS = "fp32_groups"
14
+ FP32_FLAT_GROUPS = 'fp32_flat_groups'
15
+
16
+ BASE_OPTIMIZER_STATE = 'base_optimizer_state'
17
+ BASE_OPTIMIZER_STATE_STEP = 'base_optimizer_state_step'
18
+ SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups"
19
+ PARAM_GROUPS = 'param_groups'
20
+ GROUP_PADDINGS = 'group_paddings'
21
+ PARTITION_COUNT = 'partition_count'
22
+ ZERO_STAGE = 'zero_stage'
23
+ CLIP_GRAD = 'clip_grad'
24
+ FP32_WEIGHT_KEY = "fp32"
25
+ LOSS_SCALER = 'loss_scaler'
26
+
27
+ #########################################
28
+ # Module checkpoint keys
29
+ #########################################
30
+ PARAM = 'param'
31
+ PARAM_SHAPES = 'param_shapes'
32
+ BUFFER_NAMES = 'buffer_names'
33
+ FROZEN_PARAM_SHAPES = 'frozen_param_shapes'
34
+ FROZEN_PARAM_FRAGMENTS = 'frozen_param_fragments'
35
+
36
+ #########################################
37
+ # Checkpoint naming constants
38
+ #########################################
39
+ MODEL_FILE_PREFIX = 'mp_rank_'
40
+ ZERO_FILE_PREFIX = 'zero_pp_rank_'
41
+ OPTIM_FILE_SUFFIX = '_optim_states.pt'
42
+ MODEL_FILE_SUFFIX = '_model_states.pt'
43
+ LAYER_FILE_PREFIX = 'layer_'
44
+ BF16_ZERO_FILE_PREFIX = 'bf16_' + ZERO_FILE_PREFIX
45
+ FP16_ZERO_FILE_PREFIX = 'fp16_' + ZERO_FILE_PREFIX
46
+
47
+ #########################################
48
+ # Checkpoint utility keys
49
+ #########################################
50
+ DS_VERSION = 'ds_version'
51
+
52
+ #########################################
53
+ # Universal Checkpoint keys
54
+ #########################################
55
+ UNIVERSAL_CHECKPOINT_INFO = 'universal_checkpoint_info'
56
+ UNIVERSAL_CHECKPOINT_VERSION_KEY = 'universal_checkpoint_version'
57
+ # Reserve version 0.1 for the hardcoded logic used in BLOOM-176B training
58
+ UNIVERSAL_CHECKPOINT_VERSION_VALUE = 0.2
59
+
60
+ # Vocabulary padding
61
+ VOCAB_TENSOR = 'vocab_tensor'
62
+ PADDED_VOCAB_SIZE = 'padded_vocab_size'
63
+ ORIGINAL_VOCAB_SIZE = 'original_vocab_size'
64
+
65
+ # Parameter splitting/merging
66
+ PARAM_SLICE_MAPPINGS = 'param_slice_mappings'
67
+ CAT_DIM = "cat_dim"
68
+ # Following is a special case where a parameter effectively contains sub parameters.
69
+ # As an example, consider Megatron-DeepSpeed GPT SWIGLU implementation (mlp.h_to_4h).
70
+ # In this case, a single parameter ia allocated contiguously, but used as separate parameters.
71
+ # When using universal checkpoint, we have to normalize the representation of the full parameter.
72
+ # We normalize it by concatenating all slices of the sub params and then concatenating the sub params.
73
+ # All concat operations are done on CAT_DIM (currently, no support for different concat dims sub params and TP slicing).
74
+ # Similarly, load_hp_checkpoint_state has to take the needed actions when loading from universal.
75
+ PARAM_N_SUB_PARAMS = "param_n_sub_params"
76
+
77
+ SUB_PARAM_SHAPE = "sub_param_shape"
78
+
79
+ # Regex list of parameters that require special handling
80
+ VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns'
81
+ PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns'
82
+ PARAMETER_TO_AVERAGE_PATTERNS = 'parameter_to_average_patterns'
83
+ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns'
84
+ TP_REPLICATED_PARAMETER_PATTERNS = 'tp_replicated_parameter_patterns'
85
+ PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0 = 'parameter_with_2_sub_params_cat_dim_0'
86
+ PARAMETER_WITH_SUB_PARAMS = 'parameter_with_sub_params'
87
+ SUB_PARAMS_SHAPE = 'sub_params_shape'
venv/lib/python3.10/site-packages/deepspeed/checkpoint/deepspeed_checkpoint.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ from typing import Dict
8
+ import torch
9
+
10
+ from .reshape_3d_utils import model_3d_desc
11
+ from .reshape_utils import (basic_folder_validation, merge_state, partition_data, get_files, get_files_with_prefix)
12
+
13
+ from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
14
+
15
+ from .reshape_meg_2d import reshape_meg_2d_parallel, meg_2d_parallel_map
16
+ from .zero_checkpoint import ZeROCheckpoint
17
+ from .constants import *
18
+
19
+ EMBEDDING_LAYER_INDEX = 0
20
+ FINAL_LAYER_NORM_INDEX = -1
21
+ ARGS_KEY = 'args'
22
+ CHECKPOINT_INFO_KEY = 'checkpoint_info'
23
+ ITERATION_KEY = 'iteration'
24
+
25
+ SEQUENTIAL_LAYERS = [
26
+ 'input_layernorm.weight', 'input_layernorm.bias', 'self_attention.dense.bias', 'post_attention_layernorm.weight',
27
+ 'post_attention_layernorm.bias', 'mlp.dense_4h_to_h.bias', 'position_embeddings.weight'
28
+ ]
29
+
30
+ LAYER_CONCAT_DIM = {'self_attention.dense.weight': 1, 'mlp.dense_4h_to_h.weight': 1}
31
+
32
+
33
+ class DeepSpeedCheckpoint(object):
34
+
35
+ def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None):
36
+ self.dir = dir
37
+
38
+ pipeline_parallel = len(get_files_with_prefix(get_files(dir), LAYER_FILE_PREFIX)) > 0
39
+
40
+ self._validate_folder(dir, pipeline_parallel)
41
+
42
+ self.zero_checkpoint = ZeROCheckpoint(dir)
43
+
44
+ self.file_list = get_files(dir)
45
+ self.layer_files = get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX)
46
+ self.mp_rank_files = get_files_with_prefix(self.file_list, MODEL_FILE_PREFIX)
47
+
48
+ self.layer_keys = self._get_layer_keys()
49
+ self.layer_count = len(self.layer_keys)
50
+
51
+ self.tp_degree = self.zero_checkpoint.get_src_tp_degree() if tp_degree is None else tp_degree
52
+ self.pp_degree = self.zero_checkpoint.get_src_pp_degree() if pp_degree is None else pp_degree
53
+ self.dp_degree = self.zero_checkpoint.get_src_dp_degree() if dp_degree is None else dp_degree
54
+
55
+ self.original_world_size = self.zero_checkpoint.get_src_tp_degree() * self.zero_checkpoint.get_src_pp_degree(
56
+ ) * self.zero_checkpoint.get_src_dp_degree()
57
+ self.world_size = self.tp_degree * self.pp_degree * self.dp_degree
58
+
59
+ self.old_2d_map = meg_2d_parallel_map(self.zero_checkpoint.get_src_pp_degree(),
60
+ self.zero_checkpoint.get_src_tp_degree())
61
+ self.old_2d_map.simple_init()
62
+ self.new_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.zero_checkpoint.get_src_pp_degree(),
63
+ old_tp_degree=self.zero_checkpoint.get_src_tp_degree(),
64
+ new_pp_degree=self.pp_degree,
65
+ new_tp_degree=self.tp_degree)
66
+
67
+ if self.is_change_pp_degree() or self.is_change_tp_degree() or self.is_change_dp_degree():
68
+ self.zero_checkpoint.reshape(model_3d_desc(self.pp_degree, self.tp_degree, self.dp_degree))
69
+
70
+ self.global_state = {}
71
+
72
+ self._sanity_check()
73
+ self.pp_to_transformer_map = self._build_pp_transformer_map()
74
+ self.transformer_file_map = self._build_transformer_file_map()
75
+ self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX)
76
+ self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX)
77
+ self._build_global_state()
78
+
79
+ def is_change_tp_degree(self):
80
+ return self.tp_degree != self.zero_checkpoint.get_src_tp_degree()
81
+
82
+ def is_change_pp_degree(self):
83
+ return self.pp_degree != self.zero_checkpoint.get_src_pp_degree()
84
+
85
+ def is_change_dp_degree(self):
86
+ return self.dp_degree != self.zero_checkpoint.get_src_dp_degree()
87
+
88
+ def show_2d_mapping(self):
89
+ print(f'reshaped 2d map ---- begin')
90
+
91
+ for i in range(self.pp_degree):
92
+ for j in range(self.tp_degree):
93
+ file_list = self.get_2d_parallel_files(pp_index=i, tp_index=j)
94
+ print(f'[{i}, {j}] = {file_list}')
95
+
96
+ print(f'reshaped 2d map ---- end')
97
+
98
+ def show_tp_embedding_map(self):
99
+ self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers')
100
+
101
+ def show_tp_final_norm_map(self):
102
+ self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers')
103
+
104
+ def show_pp_transformer_map(self):
105
+ self._dump_mapping(self.pp_to_transformer_map, 'pp_to_transformer_layers')
106
+
107
+ def show_transformer_file_map(self):
108
+ self._dump_mapping(self.transformer_file_map, 'rank_to_transformer_files')
109
+
110
+ def _build_global_state(self):
111
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
112
+ self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
113
+ self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None)
114
+
115
+ def get_zero_checkpoint_state(self, pp_index, tp_index, dp_index) -> dict:
116
+ return self.zero_checkpoint.get_state_for_rank(pp_index=pp_index,
117
+ tp_index=tp_index,
118
+ dp_index=dp_index,
119
+ keys_to_ignore=[PARAM_SHAPES])
120
+
121
+ def get_zero_files(self, pp_index, tp_index, dp_index) -> list:
122
+ return self.zero_checkpoint.get_files_for_rank(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index)
123
+
124
+ def get_embedding_layer_id(self):
125
+ return self.layer_keys[EMBEDDING_LAYER_INDEX]
126
+
127
+ def get_final_norm_layer_id(self):
128
+ return self.layer_keys[FINAL_LAYER_NORM_INDEX]
129
+
130
+ def get_iteration(self):
131
+ if not ITERATION_KEY in self.global_state:
132
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
133
+ self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
134
+
135
+ return self.global_state[ITERATION_KEY]
136
+
137
+ def get_embedding_state(self, tp_index: int) -> Dict:
138
+ assert tp_index in self.tp_to_embedding_map.keys()
139
+ sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]]
140
+ sd = self._merge_state_dicts(sd_list)
141
+ return sd
142
+
143
+ def get_embedding_files(self, tp_index: int) -> list:
144
+ assert tp_index in self.tp_to_embedding_map.keys()
145
+ return self.tp_to_embedding_map[tp_index]
146
+
147
+ def _get_checkpoint_value(self, key):
148
+ if not key in self.global_state:
149
+ sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
150
+ self.global_state[key] = sd.get(key, None)
151
+
152
+ return self.global_state[key]
153
+
154
+ def get_args(self):
155
+ return self._get_checkpoint_value(ARGS_KEY)
156
+
157
+ def get_checkpoint_info(self, info_key=CHECKPOINT_INFO_KEY):
158
+ return self._get_checkpoint_value(info_key)
159
+
160
+ def get_2d_parallel_state(self, tp_index: int, pp_index: int) -> dict:
161
+ assert tp_index < self.tp_degree
162
+ assert pp_index < self.pp_degree
163
+ fname_list = self.get_2d_parallel_files(tp_index=tp_index, pp_index=pp_index)
164
+ sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
165
+
166
+ merged_sd = None
167
+ for sd in sd_list:
168
+ if merged_sd is None:
169
+ merged_sd = sd
170
+ else:
171
+ merged_sd = merge_state(merged_sd, sd)
172
+
173
+ return merged_sd
174
+
175
+ def get_transformer_state(self, tp_index: int, pp_index: int) -> list:
176
+ assert tp_index < self.tp_degree
177
+ assert pp_index < self.pp_degree
178
+ t_list = []
179
+ for fname_list in self.transformer_file_map[(tp_index, pp_index)]:
180
+ sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
181
+ sd = self._merge_state_dicts(sd_list)
182
+ t_list.append(sd)
183
+ return t_list
184
+
185
+ def get_pp_transformer_map(self, pp_index: int) -> list:
186
+ assert pp_index < self.pp_degree
187
+ return self.pp_to_transformer_map[pp_index]
188
+
189
+ def get_final_norm_state(self, tp_index: int) -> Dict:
190
+ assert tp_index in self.tp_to_final_norm_map.keys()
191
+ sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu'))
192
+ return sd
193
+
194
+ def get_final_norm_files(self, tp_index: int) -> list:
195
+ assert tp_index in self.tp_to_final_norm_map.keys()
196
+ return self.tp_to_final_norm_map[tp_index]
197
+
198
+ def _build_tp_other_layer_map(self, layer_index: int):
199
+ data_map = {}
200
+ if len(self.layer_files) < 1:
201
+ return data_map
202
+ assert layer_index <= len(self.layer_files)
203
+ layer_files = get_files_with_prefix(self.layer_files, self.layer_keys[layer_index])
204
+ layer_file_partitions = partition_data(layer_files, self.tp_degree)
205
+ data_map = {i: flist for i, flist in enumerate(layer_file_partitions)}
206
+ return data_map
207
+
208
+ def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list:
209
+ assert tp_index < self.tp_degree
210
+ assert pp_index < self.pp_degree
211
+ file_indices = self.new_2d_map.get_data(pp_index=pp_index, tp_index=tp_index)
212
+ return [self.mp_rank_files[i] for i in file_indices]
213
+
214
+ def _build_pp_transformer_map(self):
215
+ data_map = {}
216
+ if self.pp_degree > 0:
217
+ transformer_layers = self.layer_keys[1:-1]
218
+ layers_per_pp = len(transformer_layers) // self.pp_degree
219
+ data_map = {
220
+ i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp]
221
+ for i in range(0, self.pp_degree)
222
+ }
223
+ return data_map
224
+
225
+ def _dump_mapping(self, data_map, map_tag=None):
226
+ if map_tag is not None:
227
+ print(f'Dump mapping: {map_tag}')
228
+ for k, v in data_map.items():
229
+ print(f'{k} = {v}')
230
+
231
+ def _build_transformer_file_map(self):
232
+ transformer_layer_keys = self.layer_keys[1:-1]
233
+ file_map = {}
234
+ # XXX: this is not guaranteed
235
+ layers_per_pp = 1
236
+ if self.pp_degree > 0:
237
+ layers_per_pp = len(transformer_layer_keys) // self.pp_degree
238
+ #print(f"{transformer_layer_keys} {layers_per_pp}")
239
+ for key_index, layer_key in enumerate(transformer_layer_keys):
240
+ pp_index = key_index // layers_per_pp
241
+ layer_files = get_files_with_prefix(self.layer_files, layer_key)
242
+ layer_file_partitions = partition_data(layer_files, self.tp_degree)
243
+ for tp_index in range(self.tp_degree):
244
+ map_key = (tp_index, pp_index)
245
+ if not map_key in file_map.keys():
246
+ file_map[map_key] = []
247
+ file_map[map_key].append(layer_file_partitions[tp_index])
248
+
249
+ return file_map
250
+
251
+ def _sanity_check(self):
252
+ assert len(self.mp_rank_files) % self.tp_degree == 0
253
+ assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0
254
+ assert self.zero_checkpoint.num_files % (self.tp_degree) == 0
255
+ # XXX: fix me - isn't always the case
256
+ # only true with --pp-partition-method 'type:transformer|embedding' \
257
+ # assert (len(self.layer_keys) - 2) % self.pp_degree == 0
258
+
259
+ def validate_files(self):
260
+ for file in self.file_list:
261
+ if not os.path.isfile(file):
262
+ print(f'Error: {file} is not existent')
263
+
264
+ def _get_layer_keys(self):
265
+ key_set = set()
266
+ key_len = len(LAYER_FILE_PREFIX) + 2
267
+ for file_path in self.layer_files:
268
+ _, fname = os.path.split(file_path)
269
+ key_set.add(fname[:key_len])
270
+ return sorted(list(key_set))
271
+
272
+ def _merge_state_dicts(self, sd_list):
273
+ merged_sd = {}
274
+ for key in sd_list[0].keys():
275
+ if not key in SEQUENTIAL_LAYERS:
276
+ cat_dim = LAYER_CONCAT_DIM.get(key, 0)
277
+ merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim)
278
+ else:
279
+ merged_sd[key] = sd_list[0][key]
280
+
281
+ return merged_sd
282
+
283
+ def _validate_folder(self, dir, pipeline_parallel):
284
+ basic_folder_validation(dir)
285
+
286
+ file_list = get_files(dir)
287
+ file_prefix_list = [MODEL_FILE_PREFIX]
288
+ if pipeline_parallel:
289
+ file_prefix_list.extend([LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01'])
290
+ for file_prefix in file_prefix_list:
291
+ ckpt_files = get_files_with_prefix(file_list, file_prefix)
292
+ assert len(
293
+ ckpt_files
294
+ ) > 0, f'{dir} seems a bogus DeepSpeed checkpoint folder: Cannot find {file_prefix}* files in there.'
venv/lib/python3.10/site-packages/deepspeed/checkpoint/ds_to_universal.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ from functools import partial
9
+ from itertools import chain
10
+ import argparse
11
+ import glob
12
+ import itertools
13
+ from concurrent.futures import ProcessPoolExecutor
14
+ import os
15
+ import re
16
+ import shutil
17
+ import torch
18
+ import tqdm
19
+ #from pprint import pprint
20
+
21
+ from deepspeed.checkpoint import DeepSpeedCheckpoint
22
+ from deepspeed.checkpoint import (
23
+ OPTIMIZER_STATE_DICT,
24
+ BASE_OPTIMIZER_STATE,
25
+ SINGLE_PARTITION_OF_FP32_GROUPS,
26
+ PARAM_GROUPS,
27
+ PARAM_SLICE_MAPPINGS,
28
+ PARAM_SHAPES,
29
+ PARAM,
30
+ CAT_DIM,
31
+ PARAM_N_SUB_PARAMS,
32
+ SUB_PARAM_SHAPE,
33
+ VOCAB_TENSOR,
34
+ UNIVERSAL_CHECKPOINT_INFO,
35
+ VOCABULARY_PARAMETER_PATTERNS,
36
+ PIPELINE_REPLICATED_PARAMETER_PATTERNS,
37
+ TP_REPLICATED_PARAMETER_PATTERNS,
38
+ PARAMETER_TO_AVERAGE_PATTERNS,
39
+ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS,
40
+ PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0,
41
+ PARAMETER_WITH_SUB_PARAMS,
42
+ SubparamShape,
43
+ )
44
+
45
+
46
+ def parse_arguments():
47
+ parser = argparse.ArgumentParser()
48
+ parser.add_argument('--input_folder', type=str, required=True, help='Input DeepSpeed Checkpoint folder')
49
+ parser.add_argument('--output_folder', type=str, required=True, help='Output DeepSpeed checkpoint folder')
50
+ parser.add_argument('--num_extract_workers',
51
+ default=4,
52
+ type=int,
53
+ help='How many parallel processes to extract zero shards')
54
+ parser.add_argument(
55
+ '--num_merge_workers',
56
+ default=2,
57
+ type=int,
58
+ help=
59
+ 'How many parallel processes to merge tp slices (more memory intensive, use much fewer than --num_extract_workers))'
60
+ )
61
+ parser.add_argument('--keep_temp_folder',
62
+ action='store_true',
63
+ help='Preserve temporary folder of intermediate checkpoint slice files. Useful for debugging.')
64
+ parser.add_argument('--no_strict',
65
+ dest='strict',
66
+ action='store_false',
67
+ help='Do not perform validity checks on converted checkpoint.')
68
+ args = parser.parse_args()
69
+ print(f'args = {args}')
70
+ return args
71
+
72
+
73
+ def _create_checkpoint_paths(base_folder, iteration, tp_degree, pp_degree):
74
+ path_list = []
75
+ iter_folder = f'iter_{iteration:07d}'
76
+ for i in range(0, tp_degree):
77
+ path_list.append([])
78
+ for j in range(0, pp_degree):
79
+ rank_folder = f'mp_rank_{i:02d}' if pp_degree == 1 else f'mp_rank_{i:02d}_{j:03d}'
80
+ ckpt_path = os.path.join(rank_folder, 'model_optim_rng.pt')
81
+ path_list[i].append(os.path.join(base_folder, iter_folder, ckpt_path))
82
+
83
+ return path_list
84
+
85
+
86
+ def _save_checkpoint(file_path, chkpt_sd):
87
+ dir, _ = os.path.split(file_path)
88
+ os.makedirs(dir, exist_ok=True)
89
+ torch.save(chkpt_sd, file_path)
90
+
91
+
92
+ def extract_zero_shards(dir, ds_checkpoint, indices_3D):
93
+ pp_index, tp_index, dp_index = indices_3D
94
+ sd = ds_checkpoint.get_zero_checkpoint_state(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index)
95
+
96
+ # pprint(f"Processing {dp_index=} {pp_index=}, {tp_index=}")
97
+
98
+ optim_sd = sd[OPTIMIZER_STATE_DICT]
99
+ param_slice_mappings = optim_sd[PARAM_SLICE_MAPPINGS]
100
+ universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO)
101
+ pipeline_replicated_params = universal_checkpoint_info.get(PIPELINE_REPLICATED_PARAMETER_PATTERNS, [])
102
+ # print(f'{pipeline_replicated_params=}')
103
+
104
+ # dict
105
+ state_groups = optim_sd[BASE_OPTIMIZER_STATE]["state"]
106
+ # list
107
+ fp32_groups = optim_sd[SINGLE_PARTITION_OF_FP32_GROUPS]
108
+ param_groups_cnt = len(state_groups)
109
+
110
+ for param_group_id in range(param_groups_cnt):
111
+
112
+ flat_state = dict(
113
+ exp_avg=state_groups[param_group_id]["exp_avg"],
114
+ exp_avg_sq=state_groups[param_group_id]["exp_avg_sq"],
115
+ fp32=fp32_groups[param_group_id],
116
+ )
117
+
118
+ if "step" in state_groups[param_group_id]:
119
+ flat_state["step"] = state_groups[param_group_id]["step"]
120
+
121
+ for name, fragment_mapping in param_slice_mappings[param_group_id].items():
122
+ if pp_index > 0 and any(re.match(pattern, name) for pattern in pipeline_replicated_params):
123
+ # Skip tied weights that are replicated in first and last pp stages
124
+ continue
125
+
126
+ # pprint(f"dpt{dp_index}{pp_index}{tp_index} {param_group_id} {name} => {fragment_mapping.start}:{fragment_mapping.numel}")
127
+ for state_key in flat_state.keys():
128
+ dump_param_fragment(dir, tp_index, dp_index, state_key, flat_state[state_key], name,
129
+ fragment_mapping.start, fragment_mapping.numel)
130
+
131
+
132
+ cnt = 0
133
+
134
+
135
+ def dp_index_to_str(dp_index):
136
+ return f"{dp_index:0>2d}"
137
+
138
+
139
+ def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, param_name, offset, numel):
140
+
141
+ global cnt # temp hack
142
+
143
+ param_base_path = os.path.join(dir, param_name, str(tp_index))
144
+ os.makedirs(param_base_path, exist_ok=True)
145
+
146
+ cnt += 1
147
+
148
+ path = os.path.join(param_base_path, f"{state_name}.{dp_index_to_str(dp_index)}")
149
+
150
+ #print(f"{param_name}: {offset}: {numel} => {path}")
151
+
152
+ # State might be a python int or a tensor
153
+ if state_name != "step" and torch.is_tensor(state_flat_tensor):
154
+ state_flat_tensor = state_flat_tensor.narrow(0, offset, numel).clone()
155
+ _save_checkpoint(path, state_flat_tensor)
156
+
157
+
158
+ def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape):
159
+ slices = []
160
+ for tp_index in range(tp_degree):
161
+ prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}")
162
+ paths = glob.glob(f"{prefix_path}.*")
163
+
164
+ if len(paths) == 0:
165
+ continue
166
+
167
+ pattern = re.compile(f"{prefix_path}\\.([0-9]+)")
168
+ dp_indices = set()
169
+ for p in paths:
170
+ m = pattern.match(p)
171
+ if m:
172
+ dp_indices.add(int(m.group(1)))
173
+ else:
174
+ raise ValueError(f"Cannot parse dp_rank from {p}")
175
+
176
+ paths = [f"{prefix_path}.{dp_index_to_str(dp_index)}" for dp_index in sorted(list(dp_indices))]
177
+ shards = [torch.load(p) for p in paths]
178
+
179
+ if state == "step":
180
+ assert all(v == shards[0] for v in shards), "All shards must have the same step value"
181
+ slice = shards[0]
182
+ else:
183
+ slice = torch.cat(shards, dim=0).reshape(slice_shape)
184
+
185
+ slices.append(slice)
186
+ return slices
187
+
188
+
189
+ def merge_tp_slices(ds_checkpoint, dir, slice_dir, tp_degree, name_and_shape):
190
+
191
+ name, shape = name_and_shape
192
+ slice_base_path = os.path.join(slice_dir, name)
193
+ param_base_path = os.path.join(dir, name)
194
+
195
+ universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO)
196
+ replicated_parameters = universal_checkpoint_info.get(TP_REPLICATED_PARAMETER_PATTERNS, [])
197
+ parameters_to_average = universal_checkpoint_info.get(PARAMETER_TO_AVERAGE_PATTERNS, [])
198
+ parameters_with_row_parallelism = universal_checkpoint_info.get(PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, [])
199
+ vocabulary_parameters = universal_checkpoint_info.get(VOCABULARY_PARAMETER_PATTERNS, [])
200
+ parameters_with_2_sub_params_cat_dim_0 = universal_checkpoint_info.get(PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, [])
201
+ parameter_with_sub_params = universal_checkpoint_info.get(PARAMETER_WITH_SUB_PARAMS, [])
202
+
203
+ unmatched_patterns = set(replicated_parameters + parameters_to_average + parameters_with_row_parallelism +
204
+ vocabulary_parameters + parameters_with_2_sub_params_cat_dim_0)
205
+ unmatched_patterns.update(chain.from_iterable(SubparamShape(**s).patterns for s in parameter_with_sub_params))
206
+
207
+ def get_matched_pattern(patterns_, name_):
208
+ matched_ = [pattern_ for pattern_ in patterns_ if re.match(pattern_, name_)]
209
+ assert len(matched_) <= 1, f'Got more than one matching patterns={matched_} for {name_}'
210
+ if matched_:
211
+ pattern_ = matched_[0]
212
+ unmatched_patterns.discard(pattern_)
213
+ return pattern_
214
+ return None
215
+
216
+ def get_matched_sub_params_pattern(name_):
217
+ for subparam_shape_dict in parameter_with_sub_params:
218
+ subparam_shape = SubparamShape(**subparam_shape_dict)
219
+ for pattern_ in subparam_shape.patterns:
220
+ if re.match(pattern_, name_):
221
+ unmatched_patterns.discard(pattern_)
222
+ return subparam_shape
223
+ return None
224
+
225
+ matched_sub_params_shape = get_matched_sub_params_pattern(name)
226
+
227
+ step_merged = _merge_zero_shards(slice_base_path, "step", tp_degree, shape)
228
+ if step_merged:
229
+ _save_checkpoint(os.path.join(param_base_path, f"step.pt"), step_merged[0])
230
+
231
+ for state in ("fp32", "exp_avg", "exp_avg_sq"):
232
+ slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape)
233
+ final_path = os.path.join(param_base_path, f"{state}.pt")
234
+
235
+ #print(f"Expected shape: {shape}")
236
+ #print(f"Fragment sizes:", list(frag.shape for frag in slices))
237
+ ckpt_dict = {}
238
+ if get_matched_pattern(replicated_parameters, name):
239
+ if len(slices) > 1:
240
+ assert all([slices[0].equal(other_slice) for other_slice in slices[1:]])
241
+ param = slices[0]
242
+ # print(f'replicate {name} using first slice')
243
+ elif get_matched_pattern(parameters_to_average, name):
244
+ param = sum(slices) / len(slices)
245
+ # print(f'merge {name} using average')
246
+ elif get_matched_pattern(parameters_with_2_sub_params_cat_dim_0, name):
247
+ cat_dim = 0
248
+ chunked_slices = [torch.chunk(s, 2, dim=cat_dim) for s in slices]
249
+ merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=cat_dim)
250
+ merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=cat_dim)
251
+ param = torch.cat([merged_chunks_0, merged_chunks_1], dim=cat_dim)
252
+ ckpt_dict[CAT_DIM] = cat_dim
253
+ ckpt_dict[PARAM_N_SUB_PARAMS] = 2
254
+ elif matched_sub_params_shape:
255
+ merged_chunks = []
256
+ partition_dim = matched_sub_params_shape.partition_dim
257
+
258
+ sub_dim_sizes = matched_sub_params_shape.shape[partition_dim]
259
+ if not isinstance(sub_dim_sizes, tuple):
260
+ sub_dim_sizes = (sub_dim_sizes, )
261
+
262
+ partition_shape = [sum(d) if isinstance(d, tuple) else d for d in matched_sub_params_shape.shape]
263
+ partition_shape = [d // tp_degree if i == partition_dim else d for i, d in enumerate(partition_shape)]
264
+ slices = [s.view(partition_shape) for s in slices]
265
+
266
+ offset = 0
267
+ for sub_dim_size in sub_dim_sizes:
268
+ part_sub_dim_size = sub_dim_size // tp_degree
269
+ merged_chunks.append(
270
+ torch.cat([s.narrow(partition_dim, offset, part_sub_dim_size) for s in slices], dim=partition_dim))
271
+ offset += part_sub_dim_size
272
+ param = torch.cat(merged_chunks, dim=partition_dim)
273
+ ckpt_dict[SUB_PARAM_SHAPE] = matched_sub_params_shape
274
+ else:
275
+ cat_dim = 1 if get_matched_pattern(parameters_with_row_parallelism, name) else 0
276
+ # print(f"merge {name} with CAT DIM: {cat_dim}")
277
+ param = torch.cat(slices, dim=cat_dim)
278
+ ckpt_dict[CAT_DIM] = cat_dim
279
+
280
+ if get_matched_pattern(vocabulary_parameters, name):
281
+ #print(f"Before {param.shape=}")
282
+ # strip padding
283
+ original_vocab_size = universal_checkpoint_info['original_vocab_size']
284
+ param = param[:original_vocab_size, :]
285
+ ckpt_dict[VOCAB_TENSOR] = True
286
+ #print(f"After {param.shape=}")
287
+
288
+ #print(f"Final shape: {param.shape}")
289
+ ckpt_dict[PARAM] = param
290
+ _save_checkpoint(final_path, ckpt_dict)
291
+
292
+ return unmatched_patterns
293
+
294
+
295
+ def _do_parallel_work(do_work, work_chunks, num_workers):
296
+ results = []
297
+ if num_workers > 1:
298
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
299
+ future_list = [executor.submit(do_work, work) for work in work_chunks]
300
+ for f in tqdm.tqdm(future_list):
301
+ results.append(f.result())
302
+ else:
303
+ # No parallel pass for unit testing
304
+ # We can't create child processes in tests
305
+ for work in tqdm.tqdm(work_chunks):
306
+ results.append(do_work(work))
307
+ return results
308
+
309
+
310
+ def _extract_zero_shard_files(args, ds_checkpoint, temp_dir):
311
+ _3d_range_list = list(
312
+ itertools.product(range(ds_checkpoint.pp_degree), range(ds_checkpoint.tp_degree),
313
+ range(ds_checkpoint.dp_degree)))
314
+ #pprint(f'{_3d_range_list=}')
315
+
316
+ do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint)
317
+ _do_parallel_work(do_work, _3d_range_list, args.num_extract_workers)
318
+
319
+
320
+ def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir):
321
+ zero_output_folder = os.path.join(args.output_folder, "zero")
322
+ do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree)
323
+ unmatched_patterns_lists = _do_parallel_work(do_work, list(slice_shapes.items()), args.num_merge_workers)
324
+
325
+ # verify that all patterns were used
326
+ # if a pattern was not used by any of the workers, then it was not used at all -> assert/alert
327
+ sets = [set(lst) for lst in unmatched_patterns_lists]
328
+ unmatched_patterns = list(set.intersection(*sets))
329
+ if args.strict:
330
+ assert not unmatched_patterns, f'Unused patterns={unmatched_patterns} while merging tp slices'
331
+ elif unmatched_patterns:
332
+ print(f'Warning: Unused patterns={unmatched_patterns} while merging tp slices')
333
+
334
+
335
+ def _save_optimizer_state(args, ds_checkpoint):
336
+ sharded_states = [BASE_OPTIMIZER_STATE, PARAM_SLICE_MAPPINGS, SINGLE_PARTITION_OF_FP32_GROUPS]
337
+ sd = ds_checkpoint.get_zero_checkpoint_state(pp_index=0, tp_index=0, dp_index=0)
338
+
339
+ optim_sd = sd[OPTIMIZER_STATE_DICT]
340
+ output_sd = {k: v for k, v in optim_sd.items() if k not in sharded_states}
341
+ output_sd[PARAM_GROUPS] = optim_sd[BASE_OPTIMIZER_STATE][PARAM_GROUPS]
342
+ zero_output_folder = os.path.join(args.output_folder, "zero")
343
+ output_file_path = os.path.join(zero_output_folder, f"optimizer_state.pt")
344
+ _save_checkpoint(output_file_path, output_sd)
345
+
346
+
347
+ def _check_for_required_state(ds_checkpoint):
348
+ universal_checkpoint_info = ds_checkpoint.get_checkpoint_info(UNIVERSAL_CHECKPOINT_INFO)
349
+ assert universal_checkpoint_info is not None, f'Required {UNIVERSAL_CHECKPOINT_INFO} state is missing in checkpoint. Verify that client creates this state.'
350
+
351
+
352
+ def main(args):
353
+ print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint')
354
+
355
+ print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Universal checkpoint in {args.output_folder}')
356
+
357
+ ds_checkpoint = DeepSpeedCheckpoint(args.input_folder)
358
+ _check_for_required_state(ds_checkpoint)
359
+
360
+ iteration = ds_checkpoint.get_iteration()
361
+ #_create_latest_file(args.output_folder, iteration)
362
+ checkpoint_paths = _create_checkpoint_paths(args.output_folder, iteration, ds_checkpoint.tp_degree,
363
+ ds_checkpoint.pp_degree)
364
+
365
+ slice_shapes = []
366
+ for mp_rank_file in ds_checkpoint.mp_rank_files:
367
+ mp_sd = torch.load(mp_rank_file, map_location=torch.device('cpu'))
368
+ slice_shapes += mp_sd[PARAM_SHAPES]
369
+
370
+ # fix back to normal flat dict, merge duplicates for tp>1
371
+ slice_shapes = dict((k, v) for d in slice_shapes for k, v in d.items())
372
+ temp_dir = os.path.join(args.output_folder, 'tmp')
373
+
374
+ print('*** 1. Extracting ZeRO fragments')
375
+ _extract_zero_shard_files(args, ds_checkpoint, temp_dir)
376
+
377
+ print('*** 2. Merging slices .....')
378
+ _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir)
379
+
380
+ print('*** 3. Saving common optimizer states')
381
+ _save_optimizer_state(args, ds_checkpoint)
382
+
383
+ if not args.keep_temp_folder:
384
+ shutil.rmtree(temp_dir, ignore_errors=True)
385
+
386
+ # Copy mp* files into output folder
387
+ for f in glob.glob(os.path.join(args.input_folder, 'mp*')):
388
+ shutil.copy2(f, args.output_folder)
389
+
390
+ # Update latest to output folder
391
+ checkpoint_root_folder, step_folder = os.path.split(args.output_folder)
392
+ latest_file = os.path.join(checkpoint_root_folder, 'latest_universal')
393
+ with open(latest_file, "w") as f:
394
+ f.write(step_folder)
395
+
396
+ print('*** Done!')
397
+
398
+
399
+ if __name__ == "__main__":
400
+ args = parse_arguments()
401
+ main(args)
venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_3d_utils.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_utils import (get_files, get_files_with_prefix, partition_data, get_zero_files)
7
+
8
+ from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
9
+
10
+ from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map)
11
+
12
+ PP_DIM = 'PP'
13
+ TP_DIM = 'TP'
14
+ DP_DIM = 'DP'
15
+
16
+
17
+ class model_3d_desc(object):
18
+
19
+ def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1):
20
+ self.pp_degree = pp_degree
21
+ self.tp_degree = tp_degree
22
+ self.dp_degree = dp_degree
23
+
24
+ def reshape(self, target_3d_desc, verbose=False):
25
+ valid_reshape, reshape_errors = self.can_reshape(target_3d_desc)
26
+ assert valid_reshape, ','.join(reshape_errors)
27
+ tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree,
28
+ old_tp_degree=self.tp_degree,
29
+ new_pp_degree=target_3d_desc.pp_degree,
30
+ new_tp_degree=target_3d_desc.tp_degree,
31
+ verbose=verbose)
32
+
33
+ flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map,
34
+ src_2d_size=self.pp_degree * self.tp_degree,
35
+ dp_degree=self.dp_degree)
36
+
37
+ return unflatten_dp_dimension(meg_2d_map=flat_3d_map, dp_degree=target_3d_desc.dp_degree)
38
+
39
+ def get_desc(self):
40
+ return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})'
41
+
42
+ def world_size(self):
43
+ return self.pp_degree * self.tp_degree * self.dp_degree
44
+
45
+ def is_valid(self, pp_index, tp_index, dp_index):
46
+ err_msg = []
47
+ valid = True
48
+ for index, degree, dim_name in [(pp_index, self.pp_degree, PP_DIM), (tp_index, self.tp_degree, TP_DIM),
49
+ (dp_index, self.dp_degree, DP_DIM)]:
50
+ if index >= degree:
51
+ valid = False
52
+ err_msg.append(f'{dim_name} indexing error: index {index} >= degree {degree}')
53
+
54
+ return valid, err_msg
55
+
56
+ def can_reshape(self, target_3d_desc):
57
+ err_msg = []
58
+ if target_3d_desc.pp_degree > self.pp_degree:
59
+ err_msg.append(
60
+ f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}')
61
+
62
+ if target_3d_desc.tp_degree > self.tp_degree:
63
+ err_msg.append(
64
+ f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}')
65
+
66
+ if target_3d_desc.dp_degree > self.dp_degree:
67
+ err_msg.append(
68
+ f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}')
69
+
70
+ return len(err_msg) == 0, err_msg
71
+
72
+
73
+ def get_model_3d_descriptor(dir):
74
+ file_list = get_files(dir)
75
+ zero_file_list = get_zero_files(dir)
76
+ num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01'))
77
+ if num_pp0_files > 0:
78
+ tp_degree = num_pp0_files
79
+ pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree
80
+ dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree))
81
+ else:
82
+ tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX))
83
+ dp_degree = max(1, len(zero_file_list) // tp_degree)
84
+ pp_degree = 1
85
+
86
+ return model_3d_desc(pp_degree, tp_degree, dp_degree)
87
+
88
+
89
+ def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree):
90
+ new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree)
91
+ for pp_index in range(meg_2d_map.pp_degree):
92
+ for tp_index in range(meg_2d_map.tp_degree):
93
+ dp0_indices = meg_2d_map.get_data(pp_index, tp_index)
94
+ for idx in dp0_indices:
95
+ dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)]
96
+ new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices)
97
+ return new_meg_2d_map
98
+
99
+
100
+ def unflatten_dp_dimension(meg_2d_map, dp_degree):
101
+ pp_degree = meg_2d_map.pp_degree
102
+ tp_degree = meg_2d_map.tp_degree
103
+ meg_2d_map_list = [meg_2d_parallel_map(pp_degree=pp_degree, tp_degree=tp_degree) for _ in range(dp_degree)]
104
+ for pp_index in range(pp_degree):
105
+ for tp_index in range(tp_degree):
106
+ flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index)
107
+ partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree)
108
+ for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list):
109
+ _2d_map.add_data(pp_index, tp_index, dp_indices)
110
+
111
+ return meg_2d_map_list
venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_meg_2d.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .reshape_utils import partition_data
7
+
8
+
9
+ class meg_2d_parallel_map(object):
10
+
11
+ def __init__(self, pp_degree, tp_degree):
12
+ self.pp_degree = pp_degree
13
+ self.tp_degree = tp_degree
14
+ self.map = {}
15
+
16
+ def simple_init(self):
17
+ self.map = {
18
+ self._make_key(i // self.tp_degree, i % self.tp_degree): [i]
19
+ for i in range(self.pp_degree * self.tp_degree)
20
+ }
21
+
22
+ def add_data(self, pp_index, tp_index, data):
23
+ self._validate_indices(pp_index, tp_index)
24
+ assert type(data) is list
25
+
26
+ key = self._make_key(pp_index, tp_index)
27
+ if not key in self.map.keys():
28
+ self.map[key] = []
29
+ self.map[key] += data
30
+
31
+ def get_data(self, pp_index=None, tp_index=None):
32
+ self._validate_indices(pp_index, tp_index)
33
+ pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index]
34
+ tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index]
35
+
36
+ result = []
37
+ for i in pp_indices:
38
+ for j in tp_indices:
39
+ result += self.map[self._make_key(i, j)]
40
+
41
+ return result
42
+
43
+ def print_data(self, tag):
44
+ print(f'{tag}')
45
+ for key, value in self.map.items():
46
+ print(f'{key} = {value}')
47
+
48
+ def _validate_indices(self, pp_index, tp_index):
49
+ assert pp_index is None or pp_index < self.pp_degree
50
+ assert tp_index is None or tp_index < self.tp_degree
51
+
52
+ def _make_key(self, i, j):
53
+ return f'{i},{j}'
54
+
55
+
56
+ def _reshape_tp_dimension(old_2d_map, new_tp_degree):
57
+ old_pp_degree = old_2d_map.pp_degree
58
+ new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree)
59
+ for i in range(old_pp_degree):
60
+ ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None)
61
+ split_ranks = partition_data(ranks_for_pp_index, new_tp_degree)
62
+ for j in range(new_tp_degree):
63
+ new_2d_map.add_data(i, j, split_ranks[j])
64
+
65
+ return new_2d_map
66
+
67
+
68
+ def _reshape_pp_dimension(old_2d_map, new_pp_degree):
69
+ old_tp_degree = old_2d_map.tp_degree
70
+ new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree)
71
+ for i in range(old_tp_degree):
72
+ ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i)
73
+ split_ranks = partition_data(ranks_for_tp_index, new_pp_degree)
74
+ for j in range(new_pp_degree):
75
+ new_2d_map.add_data(j, i, split_ranks[j])
76
+
77
+ return new_2d_map
78
+
79
+
80
+ def reshape_meg_2d_parallel(old_pp_degree, old_tp_degree, new_pp_degree, new_tp_degree, verbose=False):
81
+ assert new_pp_degree <= old_pp_degree
82
+ assert new_tp_degree <= old_tp_degree
83
+
84
+ old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree)
85
+ old_2d_map.simple_init()
86
+ if verbose:
87
+ old_2d_map.print_data(f'original_2d_map:')
88
+
89
+ if old_tp_degree != new_tp_degree:
90
+ new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree)
91
+ else:
92
+ new_tp_map = old_2d_map
93
+ if verbose:
94
+ new_tp_map.print_data(f'after_tp_reshape:')
95
+
96
+ if old_pp_degree != new_pp_degree:
97
+ final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree)
98
+ else:
99
+ final_map = new_tp_map
100
+
101
+ if verbose:
102
+ final_map.print_data(f'final_2d_map:')
103
+
104
+ return final_map
105
+
106
+
107
+ def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None):
108
+ """
109
+ Initialize model data parallel groups.
110
+
111
+ Arguments:
112
+ tp_size: number of GPUs used to parallelize model tensor.
113
+ pp_size: number of GPUs used to parallelize model pipeline.
114
+ dp_size: number of GPUs used to parallelize model data.
115
+
116
+ Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
117
+ use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
118
+ the model pipeline. The present function will
119
+ create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
120
+ and 8 data-parallel groups as:
121
+ 8 data_parallel groups:
122
+ [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
123
+ 8 tensor model-parallel groups:
124
+ [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
125
+ 4 pipeline model-parallel groups:
126
+ [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
127
+ Note that for efficiency, the caller should make sure adjacent ranks
128
+ are on the same DGX box. For example if we are using 2 DGX-1 boxes
129
+ with a total of 16 GPUs, rank 0 to 7 belong to the first box and
130
+ ranks 8 to 15 belong to the second box.
131
+ """
132
+
133
+ world_size = tp_size * pp_size * dp_size
134
+
135
+ print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}")
136
+
137
+ tensor_model_parallel_size = min(tp_size, world_size)
138
+ pipeline_model_parallel_size = min(pp_size, world_size)
139
+ data_parallel_size = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size)
140
+
141
+ num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
142
+ num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size
143
+ num_data_parallel_groups = world_size // data_parallel_size
144
+
145
+ # Build the data-parallel groups.
146
+ all_dp_group_ranks = []
147
+ for i in range(pipeline_model_parallel_size):
148
+ start_rank = i * num_pipeline_model_parallel_groups
149
+ end_rank = (i + 1) * num_pipeline_model_parallel_groups
150
+ for j in range(tensor_model_parallel_size):
151
+ ranks = range(start_rank + j, end_rank, tensor_model_parallel_size)
152
+ all_dp_group_ranks.append(list(ranks))
153
+
154
+ print("DP", all_dp_group_ranks)
155
+
156
+ # Build the model-parallel groups.
157
+ all_pp_group_ranks = []
158
+ for i in range(data_parallel_size):
159
+ ranks = [data_parallel_group_ranks[i] for data_parallel_group_ranks in all_dp_group_ranks]
160
+ all_pp_group_ranks.append(list(ranks))
161
+
162
+ print(f"PP", all_pp_group_ranks)
163
+
164
+ # Build the tensor model-parallel groups.
165
+ all_tp_group_ranks = []
166
+ for i in range(num_tensor_model_parallel_groups):
167
+ ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
168
+ all_tp_group_ranks.append(list(ranks))
169
+
170
+ print(f"TP", all_tp_group_ranks)
171
+
172
+ return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks
173
+
174
+ # # Build the pipeline model-parallel groups and embedding groups
175
+ # # (first and last rank in each pipeline model-parallel group).
176
+ # for i in range(num_pipeline_model_parallel_groups):
177
+ # ranks = range(i, world_size,
178
+ # num_pipeline_model_parallel_groups)
179
+ # print(f"EMB{i}", list(ranks))
180
+
181
+
182
+ def reshape(src, tgt):
183
+ """
184
+ reshape([tp_size_src, pp_size_src, dp_size_src],
185
+ [tp_size_tgt, pp_size_tgt, dp_size_tgt])
186
+ """
187
+
188
+ print(f"\n\n*** Reshaping: {src} => {tgt}")
189
+
190
+ tp_size_src, pp_size_src, dp_size_src = src
191
+ tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt
192
+
193
+ tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src)
194
+ tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src)
195
+ tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src)
196
+
197
+ # handle tp contraction first
198
+ print("\n*** TP contraction:")
199
+
200
+ for i, r in enumerate(tp_ranks1):
201
+ print(f'{tp_ranks1[i]} => {tp_ranks2[i]}')
202
+
203
+ # handle pp contraction next
204
+
205
+ print("\n*** PP contraction:")
206
+
207
+ for i, r in enumerate(pp_ranks1):
208
+ print(f'{pp_ranks2[i]} => {pp_ranks3[i]}')
209
+
210
+
211
+ # easy
212
+ #reshape([2,2,1],[1,1,1])
213
+
214
+ # probably need more logic to suggest how to pack
215
+ #reshape([4,4,1],[2,2,1])
216
+
217
+ #reshape([2,4,2], [8,32,1])
218
+
219
+ # get_mpu_ranks(2,2,2)
220
+ # get_mpu_ranks(4,2,1)
221
+ # get_mpu_ranks(2,4,1)
222
+ # get_mpu_ranks(1,1,8)
venv/lib/python3.10/site-packages/deepspeed/checkpoint/reshape_utils.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import re
8
+ import torch
9
+ from collections import OrderedDict
10
+ from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX)
11
+
12
+
13
+ def basic_folder_validation(dir):
14
+ assert os.path.exists(dir), f'{dir} path does not exist'
15
+ assert os.path.isdir(dir), f'{dir} is not a folder'
16
+
17
+
18
+ def get_files_with_prefix(all_files, prefix):
19
+ file_list = []
20
+ for file_path in all_files:
21
+ _, fname = os.path.split(file_path)
22
+ if fname.startswith(prefix):
23
+ file_list.append(file_path)
24
+
25
+ return sorted(file_list)
26
+
27
+
28
+ def validate_files(file_list):
29
+ for file in file_list:
30
+ if not os.path.isfile(file):
31
+ print(f'Error: {file} is not existent')
32
+
33
+
34
+ def get_files(dir):
35
+ file_list = []
36
+ for root, _, files in os.walk(dir):
37
+ for file in files:
38
+ file_list.append(os.path.join(root, file))
39
+ return file_list
40
+
41
+
42
+ def sort_zero_files(files, prefix):
43
+ pattern = f"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)"
44
+ rank_pairs = []
45
+ for f in files:
46
+ m = re.search(pattern, f)
47
+ if m:
48
+ dp_rank = int(m.group(1))
49
+ mp_rank = int(m.group(2))
50
+ rank_pairs.append((dp_rank, mp_rank, f))
51
+ else:
52
+ raise ValueError(f"Cannot parse dp_rank and mp_rank from {f}")
53
+
54
+ sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1]))
55
+ return [f for _, _, f in sorted_files]
56
+
57
+
58
+ def get_zero_files(dir):
59
+ file_list = get_files(dir)
60
+ for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:
61
+ zero_files = get_files_with_prefix(file_list, prefix)
62
+ if len(zero_files) > 0:
63
+ return sort_zero_files(zero_files, prefix)
64
+
65
+ return []
66
+
67
+
68
+ def partition_data(data_list, num_partitions):
69
+ num_elems = len(data_list)
70
+ assert num_elems % num_partitions == 0
71
+ partition_size = num_elems // num_partitions
72
+ partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]
73
+ return partitions_list
74
+
75
+
76
+ def _key_list_to_string(key_list):
77
+ return '.'.join(key_list)
78
+
79
+
80
+ def merge_state_dict(dict_a, dict_b, key_list):
81
+ merged_dict = type(dict_a)({})
82
+
83
+ for key, value in dict_b.items():
84
+ if key in dict_a.keys():
85
+ merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])
86
+ else:
87
+ merged_dict[key] = value
88
+
89
+ return merged_dict
90
+
91
+
92
+ def merge_state_list(list_a, list_b, key_list):
93
+ if len(list_a) != len(list_b):
94
+ print(f'{_key_list_to_string(key_list)}')
95
+ raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')
96
+
97
+ return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]
98
+
99
+
100
+ def merge_state(state_a, state_b, key_list=[]):
101
+ if type(state_a) != type(state_b):
102
+ key_list_string = _key_list_to_string(key_list)
103
+ print(f'key_list = {key_list_string}')
104
+ raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')
105
+
106
+ if type(state_a) in (dict, OrderedDict):
107
+ return merge_state_dict(state_a, state_b, key_list)
108
+ elif type(state_a) in (list, tuple):
109
+ return type(state_a)(merge_state_list(state_a, state_b, key_list))
110
+ elif torch.is_tensor(state_a):
111
+ return torch.cat([state_a, state_b], 0)
112
+ else:
113
+ return state_a
venv/lib/python3.10/site-packages/deepspeed/checkpoint/universal_checkpoint.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import re
8
+ import torch
9
+ import types
10
+ from typing import List, Tuple, Union
11
+ from dataclasses import dataclass
12
+ from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS, SUB_PARAM_SHAPE)
13
+
14
+
15
+ @dataclass
16
+ class SubparamShape:
17
+ patterns: List[str]
18
+ shape: Tuple[Union[Tuple[int], int]]
19
+ partition_dim: int
20
+
21
+
22
+ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
23
+ hp_mapping = self._hp_mapping
24
+ hp_mapping.optim_fragment = {}
25
+
26
+ hp_keys = []
27
+ for file in os.listdir(folder):
28
+ # We expect files named something like "exp_avg.pt", "exp_avg_sq.pt", "fp32.pt"
29
+ pattern = r'(.+).pt'
30
+ match = re.search(pattern, file)
31
+ if match:
32
+ hp_keys.append(match.group(1))
33
+
34
+ step = None
35
+ for key in hp_keys:
36
+ ckpt_file = os.path.join(folder, f"{key}.pt")
37
+ ckpt_dict = torch.load(ckpt_file)
38
+
39
+ if key == "step":
40
+ step = ckpt_dict
41
+ continue
42
+
43
+ full_hp_param = ckpt_dict[PARAM]
44
+
45
+ # need to deal with slices that were averaged.
46
+ # the opposite of averaging here becomes an exact copy of the first slice
47
+ # I thought of 2 ways:
48
+ # implementation a. find a way for a client to pass a dict with patterns
49
+ # if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
50
+ # tp_rank = 0
51
+ # tp_world_size = 1
52
+ # the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
53
+ # self.shape that means we automatically copy?
54
+ # implementation b.
55
+ # this version requires no additional data passed from the client
56
+ # if the shapes already match it must be slices that were averaged - so we just hack around those
57
+ if full_hp_param.shape == self.shape:
58
+ tp_rank = 0
59
+ tp_world_size = 1
60
+
61
+ # special case for word_embeddings weights which get padded differently depending on TP degree.
62
+ # the converter to universal currently strips the original padding completely so the saved
63
+ # weight is padding-free and we just need to add new padding depending on the target TP
64
+ # degree
65
+ is_vocab_tensor = ckpt_dict.get(VOCAB_TENSOR, False)
66
+ if is_vocab_tensor:
67
+ # In the absence of data passed from the user wrt new padded vocab specific to tp degree
68
+ # we can again derive that data by reverse engineering the target shapes like so:
69
+ padded_target_vocab_size = self.shape[0] * tp_world_size
70
+ assert padded_target_vocab_size >= full_hp_param.shape[0], \
71
+ f'Vocab tensor padded size {padded_target_vocab_size} < loaded universal size {full_hp_param.shape[0]}'
72
+ if padded_target_vocab_size > full_hp_param.shape[0]:
73
+ padding_size = padded_target_vocab_size - full_hp_param.shape[0]
74
+ full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
75
+
76
+ full_param_numel = full_hp_param.numel()
77
+ tp_slice_numel = self.numel()
78
+ # if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
79
+ # print_rank_0(f'{full_hp_param[:10]=}', force=True)
80
+
81
+
82
+ assert full_param_numel == tp_world_size * tp_slice_numel, \
83
+ f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
84
+
85
+ # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
86
+ # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
87
+
88
+ sub_param_shape = ckpt_dict.get(SUB_PARAM_SHAPE, None)
89
+ # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
90
+ # special case is when a single parameter is effectively a container for multiple sub parameters
91
+ # (more details at PARAM_N_SUB_PARAMS definition)
92
+ chunk_dim = ckpt_dict.get(CAT_DIM, 0)
93
+ n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1)
94
+ if sub_param_shape:
95
+ partition_dim = sub_param_shape.partition_dim
96
+ sub_dim_sizes = sub_param_shape.shape[partition_dim]
97
+ if not isinstance(sub_dim_sizes, tuple):
98
+ sub_dim_sizes = (sub_dim_sizes, )
99
+
100
+ partition_shape = [sum(d) if isinstance(d, tuple) else d for d in sub_param_shape.shape]
101
+ full_hp_param = full_hp_param.view(partition_shape)
102
+
103
+ offset = 0
104
+ merged_chunks = []
105
+ for sub_dim_size in sub_dim_sizes:
106
+ sub_params_tp_slice = full_hp_param.narrow(partition_dim,
107
+ offset, sub_dim_size).chunk(tp_world_size,
108
+ dim=partition_dim)[tp_rank]
109
+ merged_chunks.append(sub_params_tp_slice)
110
+ offset += sub_dim_size
111
+ tp_hp_slice = torch.cat(merged_chunks, dim=partition_dim)
112
+
113
+ elif n_sub_params > 1:
114
+ sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim)
115
+ sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params]
116
+ tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim)
117
+ else:
118
+ # this performs the opposite of cat when merging TP slices
119
+ tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
120
+
121
+ tp_hp_slice = tp_hp_slice.flatten()
122
+
123
+ lp_frag_address = hp_mapping.lp_fragment_address
124
+ tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
125
+
126
+ # print(f"{key} SHAPE: {tp_hp_slice.shape=}")
127
+ # print(f"{key} SHAPE: {dst_tensor.shape=}")
128
+ # print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
129
+
130
+ if key == FP32_WEIGHT_KEY:
131
+ dst_tensor = hp_mapping.get_hp_fragment()
132
+ assert dst_tensor.numel() == lp_frag_address.numel, \
133
+ f'Load checkpoint {key} dst numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
134
+ dst_tensor.data.copy_(tp_hp_fragment.data)
135
+ else:
136
+ assert tp_hp_fragment.numel() == lp_frag_address.numel, \
137
+ f'Load checkpoint {key} dst numel {tp_hp_fragment.numel()} != src numel {lp_frag_address.numel}'
138
+
139
+ hp_mapping.optim_fragment[key] = tp_hp_fragment.clone().detach()
140
+
141
+ return step
142
+
143
+
144
+ def enable_universal_checkpoint(param_list):
145
+ for param in param_list:
146
+ param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param)
venv/lib/python3.10/site-packages/deepspeed/checkpoint/utils.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+ from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
9
+
10
+
11
+ def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
12
+ ckpt_name = os.path.join(
13
+ base_folder,
14
+ MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
15
+ )
16
+ return ckpt_name
17
+
18
+
19
+ def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
20
+ zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
21
+ mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
22
+ zero_ckpt_name = os.path.join(
23
+ base_folder,
24
+ zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
25
+ )
26
+ return zero_ckpt_name
27
+
28
+
29
+ def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
30
+ ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
31
+ ckpt_path = os.path.join(base_folder, ckpt_file)
32
+ return ckpt_path
33
+
34
+
35
+ # We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()
36
+ # saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
37
+ # This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
38
+ # Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
39
+ # It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
40
+ # See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
41
+ def clone_tensors_for_torch_save(item, device=torch.device('cpu')):
42
+ """
43
+ Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.
44
+ Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
45
+
46
+ Parameters:
47
+ - ``item``: tensor to clone or (possibly nested) container of tensors to clone.
48
+ - ``device``: target device (defaults to 'cpu')
49
+
50
+ Returns:
51
+ - copy of ``item`` with cloned tensors on target device
52
+ """
53
+ if torch.is_tensor(item):
54
+ return item.detach().clone().to(device)
55
+ elif isinstance(item, list):
56
+ return [clone_tensors_for_torch_save(v, device) for v in item]
57
+ elif isinstance(item, tuple):
58
+ return tuple([clone_tensors_for_torch_save(v, device) for v in item])
59
+ elif isinstance(item, dict):
60
+ return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})
61
+ else:
62
+ return item
venv/lib/python3.10/site-packages/deepspeed/checkpoint/zero_checkpoint.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT)
9
+
10
+ from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state)
11
+
12
+ from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
13
+
14
+ GROUP_STATE_KEY = 'state'
15
+
16
+
17
+ class ZeROCheckpoint(object):
18
+
19
+ def __init__(self, dir):
20
+ basic_folder_validation(dir)
21
+ self.dir = dir
22
+ self.file_list = get_zero_files(dir)
23
+ self.num_files = len(self.file_list)
24
+ assert self.num_files > 0, f'No ZeRO files found in {dir}'
25
+
26
+ self.src_3d = get_model_3d_descriptor(dir)
27
+ self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree,
28
+ tp_degree=self.src_3d.tp_degree,
29
+ dp_degree=self.src_3d.dp_degree)
30
+ self._3d_file_map = self.src_3d.reshape(self.target_3d)
31
+
32
+ def get_src_world_size(self):
33
+ return self.src_3d.world_size()
34
+
35
+ def get_src_tp_degree(self):
36
+ return self.src_3d.tp_degree
37
+
38
+ def get_src_pp_degree(self):
39
+ return self.src_3d.pp_degree
40
+
41
+ def get_src_dp_degree(self):
42
+ return self.src_3d.dp_degree
43
+
44
+ def get_file_indices_for_rank(self, pp_index, tp_index, dp_index):
45
+ assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}'
46
+ dp_2d_map = self._3d_file_map[dp_index]
47
+ return dp_2d_map.get_data(pp_index, tp_index)
48
+
49
+ def get_files_for_rank(self, pp_index, tp_index, dp_index):
50
+ file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index)
51
+ return [self.file_list[idx] for idx in file_idx_list]
52
+
53
+ def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True):
54
+ state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index)
55
+ merged_sd = None
56
+ for state_file in state_file_list:
57
+ sd = torch.load(state_file, map_location=torch.device('cpu'))
58
+ for key in keys_to_ignore:
59
+ sd.pop(key, None)
60
+
61
+ if strip_tensor_paddings:
62
+ self._strip_tensor_paddings(sd)
63
+
64
+ if merged_sd is None:
65
+ merged_sd = sd
66
+ else:
67
+ merged_sd = merge_state(merged_sd, sd)
68
+
69
+ self._update_partition_count(merged_sd)
70
+ if strip_tensor_paddings:
71
+ self._clear_group_paddings(merged_sd)
72
+
73
+ return merged_sd
74
+
75
+ def print_3d_index_map(self, tag=None):
76
+ if tag:
77
+ print(f'3D index map: {tag}')
78
+ for dp_index, _2d_map in enumerate(self._3d_file_map):
79
+ _2d_map.print_data(f'dp = {dp_index}')
80
+
81
+ def print_3d_file_map(self, tag=None):
82
+ if tag:
83
+ print(f'3D file map: {tag}')
84
+ for dp_index, _2d_map in enumerate(self._3d_file_map):
85
+ for pp_index in _2d_map.pp_degree:
86
+ for tp_index in _2d_map.tp_degree:
87
+ file_index_list = _2d_map.get_data(pp_index, tp_index)
88
+ file_list = [self.file_list[idx] for idx in file_index_list]
89
+ print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}')
90
+
91
+ def reshape(self, target_3d_desc: model_3d_desc):
92
+ self.target_3d = target_3d_desc
93
+ self._3d_file_map = self.src_3d.reshape(self.target_3d)
94
+
95
+ def _strip_tensor_paddings(self, sd):
96
+ param_group_states = self._get_param_group_states(sd)
97
+ if param_group_states is None:
98
+ return
99
+
100
+ group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
101
+ if group_paddings is None:
102
+ return
103
+
104
+ for key, group_state in param_group_states.items():
105
+ if group_paddings[key] == 0:
106
+ continue
107
+ for state_name, state_value in group_state.items():
108
+ if state_name != "step" and torch.is_tensor(state_value):
109
+ raw_length = state_value.numel() - group_paddings[key]
110
+ group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone()
111
+ else:
112
+ group_state[state_name] = state_value
113
+
114
+ def _clear_group_paddings(self, sd):
115
+ group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
116
+ if group_paddings:
117
+ num_groups = len(group_paddings)
118
+ sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups
119
+
120
+ def _get_optimizer_state(self, sd, state_key):
121
+ optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
122
+ if optimizer_state is None:
123
+ return None
124
+
125
+ return optimizer_state.get(state_key, None)
126
+
127
+ def _get_param_group_states(self, sd):
128
+ optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
129
+ if optimizer_state is None:
130
+ return None
131
+
132
+ base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None)
133
+ if base_optimizer_state is None:
134
+ return None
135
+
136
+ return base_optimizer_state.get(GROUP_STATE_KEY, None)
137
+
138
+ def _update_partition_count(self, sd):
139
+ partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT)
140
+ if partition_counts:
141
+ num_groups = len(partition_counts)
142
+ sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups
venv/lib/python3.10/site-packages/deepspeed/compression/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .compress import init_compression, redundancy_clean
7
+ from .scheduler import compression_scheduler
8
+ from .helper import convert_conv1d_to_linear
venv/lib/python3.10/site-packages/deepspeed/compression/basic_layer.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import math
8
+ from torch import nn
9
+ from torch.nn import init
10
+ import deepspeed.comm as dist
11
+ from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer
12
+ from deepspeed.utils import logger
13
+
14
+ g_mpu = None
15
+
16
+
17
+ class QuantAct(nn.Module):
18
+ """
19
+ Class to quantize given activations. Note that when using this function, the input activation quantization range will be fixed for all
20
+ tokens/images for inference. This generally will affect some accuracy but achieve better latency performance.
21
+ Parameters:
22
+ ----------
23
+ act_range_momentum : float, default 0.95
24
+ Momentum for updating the activation quantization range.
25
+ quant_mode : str, default 'symmetric'
26
+ """
27
+
28
+ def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'):
29
+ super(QuantAct, self).__init__()
30
+
31
+ self.act_range_momentum = act_range_momentum
32
+ self.quant_mode = quant_mode
33
+ if quant_mode == 'symmetric':
34
+ self.act_function = SymQuantizer.apply
35
+ else:
36
+ self.act_function = AsymQuantizer.apply
37
+
38
+ self.register_buffer('x_min_max', torch.zeros(2))
39
+
40
+ def forward(self, x, num_bits, *args):
41
+ """
42
+ x: the activation that we need to quantize
43
+ num_bits: the number of bits we need to quantize the activation to
44
+ *args: some extra arguments that are useless but needed for align with the interface of other quantization functions
45
+ """
46
+
47
+ if self.training:
48
+ x_min = x.data.min()
49
+ x_max = x.data.max()
50
+
51
+ # Initialization
52
+ if self.x_min_max[0] == self.x_min_max[1]:
53
+ self.x_min_max[0] = x_min
54
+ self.x_min_max[1] = x_max
55
+
56
+ # if do not need momentum, please set self.act_range_momentum = 0
57
+ self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
58
+ self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
59
+
60
+ x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1])
61
+
62
+ return x_q
63
+
64
+
65
+ class Embedding_Compress(nn.Embedding):
66
+
67
+ def __init__(self, *kargs):
68
+ super(Embedding_Compress, self).__init__(*kargs)
69
+ self.weight.start_bits = None
70
+ self.weight.target_bits = None
71
+ self.weight.q_period = None
72
+ self.weight_quantization_enabled_in_forward = False
73
+ self.weight_quantization_enabled = False
74
+
75
+ def extra_repr(self):
76
+ return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format(
77
+ self.num_embeddings, self.embedding_dim, self.weight.target_bits)
78
+
79
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
80
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
81
+ self.weight.start_bits = start_bits
82
+ self.weight.target_bits = target_bits
83
+ self.weight.q_period = quantization_period
84
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
85
+ if self.weight_quantization_enabled_in_forward:
86
+ logger.warning(
87
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
88
+ )
89
+ if self.weight.target_bits >= 3:
90
+ if quantization_type == 'symmetric':
91
+ self.weight_quantizer = SymQuantizer.apply
92
+ else:
93
+ self.weight_quantizer = AsymQuantizer.apply
94
+ elif self.weight.target_bits == 2:
95
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
96
+ self.weight_quantizer = TernaryQuantizer.apply
97
+ elif self.weight.target_bits == 1:
98
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
99
+ self.weight_quantizer = BinaryQuantizer.apply
100
+ # for embedding, we always use token-wise quantization
101
+ self.weight_quantize_num_groups = self.weight.size(0)
102
+
103
+ def fix_weight_quantization(self):
104
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
105
+ self.weight_quantize_num_groups).data
106
+ self.weight_quantization_enabled_in_forward = False
107
+ return None
108
+
109
+ def forward(self, input):
110
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
111
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
112
+ self.weight_quantize_num_groups)
113
+ else:
114
+ weight = self.weight
115
+
116
+ out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type,
117
+ self.scale_grad_by_freq, self.sparse)
118
+ return out
119
+
120
+
121
+ class LinearLayer_Compress(nn.Linear):
122
+ """
123
+ Linear layer with compression.
124
+ """
125
+
126
+ def __init__(self, *kargs, bias=True):
127
+ super(LinearLayer_Compress, self).__init__(*kargs, bias=bias)
128
+ self.sparse_pruning_method = None
129
+ self.row_pruning_method = None
130
+ self.head_pruning_method = None
131
+ self.activation_quantization_method = None
132
+ self.weight.start_bits = None
133
+ self.weight.target_bits = None
134
+ self.weight.q_period = None
135
+ self.weight_quantization_enabled_in_forward = False
136
+ self.weight_quantization_enabled = False
137
+ self.sparse_pruning_enabled = False
138
+ self.row_pruning_enabled = False
139
+ self.head_pruning_enabled = False
140
+ self.activation_quantization_enabled = False
141
+
142
+ def extra_repr(self):
143
+ return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format(
144
+ self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \
145
+ self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits)
146
+
147
+ def enable_sparse_pruning(self, ratio, method):
148
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
149
+ self.sparse_pruning_ratio = ratio
150
+ self.sparse_pruning_method = method
151
+ if method == 'l1':
152
+ weight_norm = torch.abs(self.weight.data)
153
+ mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
154
+ mask = mask.view(self.weight.size())
155
+ mask = mask.to(self.weight.device)
156
+ elif method == 'topk':
157
+ self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
158
+ self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
159
+ init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
160
+ mask = None
161
+ else:
162
+ raise NotImplementedError
163
+
164
+ self.register_buffer('sparse_pruning_mask', mask)
165
+
166
+ def enable_row_pruning(self, ratio, method):
167
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
168
+ self.row_pruning_ratio = ratio
169
+ self.row_pruning_method = method
170
+
171
+ if method == 'l1':
172
+ # compute the l1 norm of each column
173
+ weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=1)
174
+ mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False)
175
+ mask = mask.view(-1, 1)
176
+ mask = mask.to(self.weight.device)
177
+ elif method == 'topk':
178
+ self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1))
179
+ self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device)
180
+ init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5))
181
+ mask = None
182
+ else:
183
+ raise NotImplementedError
184
+
185
+ self.register_buffer('row_pruning_mask', mask)
186
+
187
+ def enable_head_pruning(self, ratio, method, num_heads):
188
+ # Here, we support only topk based pruning
189
+ self.num_heads = num_heads
190
+ self.head_pruning_ratio = ratio
191
+ self.head_pruning_method = method
192
+
193
+ if method not in ['topk']:
194
+ raise NotImplementedError
195
+ else:
196
+ self.head_pruning_ratio = ratio
197
+ self.head_pruning_scores = nn.Parameter(torch.Tensor(1,
198
+ self.num_heads)) # we apply the pruning to O matrix
199
+ self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device)
200
+ init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5))
201
+
202
+ def fix_sparse_pruning_helper(self):
203
+ mask = self.get_mask(pruning_type='sparse')
204
+ self.weight.data = self.weight.data * mask
205
+ del self.sparse_pruning_mask
206
+ if self.sparse_pruning_method == 'topk':
207
+ del self.sparse_mask_scores
208
+ self.sparse_pruning_method = None
209
+ self.sparse_pruning_enabled = False
210
+ return None
211
+
212
+ def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False):
213
+ # This function is used for row/col pruning
214
+ # particularly, if we have two back-to-back layers, F1 and F2; when
215
+ # we remove rows from F1, we also need to remove columns from F2
216
+ # However, if we only have one layer, F1, then we only need to mask pruned
217
+ # rows as 0 in F1
218
+ if mask is None:
219
+ mask = self.get_mask(pruning_type='row').bool()
220
+ if dim_reduction:
221
+ start_bits = self.weight.start_bits
222
+ target_bits = self.weight.target_bits
223
+ q_period = self.weight.q_period
224
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1), :])
225
+ self.weight.start_bits = start_bits
226
+ self.weight.target_bits = target_bits
227
+ self.weight.q_period = q_period
228
+ if self.bias is not None:
229
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
230
+ self.out_features = self.weight.size(0)
231
+ else:
232
+ self.weight.data = self.weight.data * mask.view(-1, 1)
233
+ if self.bias is not None:
234
+ self.bias.data = self.bias.data * mask.view(-1)
235
+
236
+ del self.row_pruning_mask
237
+ if self.row_pruning_method == 'topk':
238
+ del self.row_mask_scores
239
+ self.row_pruning_method = None
240
+ else:
241
+ # this is generally for column pruning
242
+ start_bits = self.weight.start_bits
243
+ target_bits = self.weight.target_bits
244
+ q_period = self.weight.q_period
245
+ self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)])
246
+ self.weight.start_bits = start_bits
247
+ self.weight.target_bits = target_bits
248
+ self.weight.q_period = q_period
249
+ self.in_features = self.weight.size(1)
250
+ mask = None
251
+ self.row_pruning_enabled = False
252
+ return mask
253
+
254
+ def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False):
255
+ # similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix
256
+ num_heads = num_heads if num_heads else self.num_heads
257
+ if mask is None:
258
+ if self.head_pruning_method == 'topk':
259
+ mask = self.get_mask(pruning_type='head').bool()
260
+ if dim_reduction:
261
+ shape = self.weight.size(0)
262
+ start_bits = self.weight.start_bits
263
+ target_bits = self.weight.target_bits
264
+ q_period = self.weight.q_period
265
+ self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads,
266
+ -1)[mask.view(-1), :].reshape(-1,
267
+ shape).t())
268
+ self.weight.start_bits = start_bits
269
+ self.weight.target_bits = target_bits
270
+ self.weight.q_period = q_period
271
+ else:
272
+
273
+ shape = self.weight.size()
274
+ self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(
275
+ shape[1], shape[0]).t()
276
+
277
+ if self.head_pruning_method == 'topk':
278
+ del self.head_pruning_scores
279
+ self.head_pruning_method = None
280
+ else:
281
+ raise NotImplementedError
282
+ else:
283
+ start_bits = self.weight.start_bits
284
+ target_bits = self.weight.target_bits
285
+ q_period = self.weight.q_period
286
+ shape = self.weight.size(1)
287
+ self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape))
288
+ self.weight.start_bits = start_bits
289
+ self.weight.target_bits = target_bits
290
+ self.weight.q_period = q_period
291
+ if self.bias is not None:
292
+ self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1))
293
+ self.head_pruning_enabled = False
294
+ return mask
295
+
296
+ def get_mask(self, pruning_type='row'):
297
+ if pruning_type == 'sparse':
298
+ if self.sparse_pruning_method == 'l1':
299
+ return self.sparse_pruning_mask.to(self.weight.device)
300
+ elif self.sparse_pruning_method == 'topk':
301
+ return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
302
+ else:
303
+ raise NotImplementedError
304
+ if pruning_type == 'row':
305
+ if self.row_pruning_method == 'l1':
306
+ return self.row_pruning_mask.to(self.weight.device)
307
+ elif self.row_pruning_method == 'topk':
308
+ return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False)
309
+ else:
310
+ raise NotImplementedError
311
+ elif pruning_type == 'head':
312
+ if self.head_pruning_method == 'topk':
313
+ return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False)
314
+ else:
315
+ raise NotImplementedError
316
+ else:
317
+ raise NotImplementedError
318
+
319
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
320
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
321
+ self.weight.start_bits = start_bits
322
+ self.weight.target_bits = target_bits
323
+ self.weight.q_period = quantization_period
324
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
325
+ if self.weight_quantization_enabled_in_forward:
326
+ logger.warning(
327
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
328
+ )
329
+ if self.weight.target_bits >= 3:
330
+ if quantization_type == 'symmetric':
331
+ self.weight_quantizer = SymQuantizer.apply
332
+ else:
333
+ self.weight_quantizer = AsymQuantizer.apply
334
+ elif self.weight.target_bits == 2:
335
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
336
+ self.weight_quantizer = TernaryQuantizer.apply
337
+ elif self.weight.target_bits == 1:
338
+ assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
339
+ self.weight_quantizer = BinaryQuantizer.apply
340
+ self.weight_quantize_num_groups = num_groups
341
+
342
+ def fix_weight_quantization(self):
343
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
344
+ self.weight_quantize_num_groups).data
345
+ self.weight_quantization_enabled_in_forward = False
346
+ return None
347
+
348
+ def enable_activation_quantization(self, bits, quantization_type, range_calibration):
349
+ assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
350
+ self.activation_quantization_bits = bits
351
+ self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
352
+ if range_calibration == 'static':
353
+ self.activation_quantizer = QuantAct(quant_mode=quantization_type)
354
+ else:
355
+ if quantization_type == 'symmetric':
356
+ self.activation_quantizer = SymQuantizer.apply
357
+ else:
358
+ self.activation_quantizer = AsymQuantizer.apply
359
+
360
+ def head_pruning_reshape(self, w, mask):
361
+ shape = w.shape
362
+ return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t()
363
+
364
+ def forward(self, input, skip_bias_add=False):
365
+
366
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
367
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
368
+ self.weight_quantize_num_groups)
369
+ bias = self.bias
370
+ else:
371
+ weight = self.weight
372
+ bias = self.bias
373
+
374
+ if self.sparse_pruning_enabled and self.sparse_pruning_method:
375
+ mask = self.get_mask(pruning_type='sparse')
376
+ weight = weight * mask.view(self.weight.size())
377
+
378
+ if self.row_pruning_enabled and self.row_pruning_method:
379
+ mask = self.get_mask(pruning_type='row')
380
+ weight = weight * mask.view(-1, 1)
381
+ if bias is not None:
382
+ bias = bias * mask.view(-1)
383
+
384
+ if self.head_pruning_enabled and self.head_pruning_method:
385
+ mask = self.get_mask(pruning_type='head')
386
+ weight = self.head_pruning_reshape(weight, mask)
387
+
388
+ if self.activation_quantization_enabled:
389
+ if 'dynamic' in self.activation_quantization_method:
390
+ num_groups = input.numel() // input.size(-1)
391
+ else:
392
+ num_groups = 1
393
+ input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
394
+
395
+ if skip_bias_add:
396
+ # used for mpu linear layers
397
+ output = nn.functional.linear(input, weight, None)
398
+ return output, bias
399
+ else:
400
+ output = nn.functional.linear(input, weight, bias)
401
+ return output
402
+
403
+
404
+ class Conv2dLayer_Compress(nn.Conv2d):
405
+ """
406
+ Conv2D layer with compression.
407
+ """
408
+
409
+ def __init__(self, *kargs):
410
+ super(Conv2dLayer_Compress, self).__init__(*kargs)
411
+ self.sparse_pruning_method = None
412
+ self.channel_pruning_method = None
413
+ self.activation_quantization_method = None
414
+ self.weight.start_bits = None
415
+ self.weight.target_bits = None
416
+ self.weight.q_period = None
417
+ self.weight_quantization_enabled_in_forward = False
418
+ self.sparse_pruning_enabled = False
419
+ self.channel_pruning_enabled = False
420
+ self.activation_quantization_enabled = False
421
+
422
+ def __repr__(self):
423
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
424
+ ', stride={stride}')
425
+ if self.padding != (0, ) * len(self.padding):
426
+ s += ', padding={padding}'
427
+ if self.dilation != (1, ) * len(self.dilation):
428
+ s += ', dilation={dilation}'
429
+ if self.output_padding != (0, ) * len(self.output_padding):
430
+ s += ', output_padding={output_padding}'
431
+ if self.groups != 1:
432
+ s += ', groups={groups}'
433
+ if self.bias is None:
434
+ s += ', bias=False'
435
+ if self.padding_mode != 'zeros':
436
+ s += ', padding_mode={padding_mode}'
437
+ output = s.format(**self.__dict__)
438
+
439
+ return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format(
440
+ self.sparse_pruning_method is not None, self.channel_pruning_method is not None,
441
+ self.activation_quantization_method is not None, self.weight.target_bits)
442
+
443
+ def enable_sparse_pruning(self, ratio, method):
444
+ self.sparse_pruning_ratio = ratio
445
+ self.sparse_pruning_method = method
446
+ if method == 'l1':
447
+ weight_norm = torch.abs(self.weight.data)
448
+ mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
449
+ mask = mask.view(self.weight.size())
450
+ mask = mask.to(self.weight.device)
451
+ elif method == 'topk':
452
+ self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
453
+ self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
454
+ init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
455
+ mask = None
456
+ else:
457
+ raise NotImplementedError
458
+
459
+ self.register_buffer('sparse_pruning_mask', mask)
460
+
461
+ def enable_channel_pruning(self, ratio, method):
462
+ # Here, we support two cases: L1 norm based pruning and topk based pruning
463
+ self.channel_pruning_ratio = ratio
464
+ self.channel_pruning_method = method
465
+
466
+ if method == 'l1':
467
+ # compute the l1 norm of each conv2d kernel (the last three dimension)
468
+ weight_norm = torch.linalg.norm(self.weight.data, ord=1, dim=[1, 2, 3])
469
+ mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False)
470
+ mask = mask.view(-1, 1, 1, 1)
471
+ mask = mask.to(self.weight.device)
472
+ elif method == 'topk':
473
+ self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1))
474
+ self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device)
475
+ init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5))
476
+ mask = None
477
+ else:
478
+ raise NotImplementedError
479
+
480
+ self.register_buffer('channel_pruning_mask', mask)
481
+
482
+ def fix_sparse_pruning_helper(self):
483
+ mask = self.get_mask(pruning_type='sparse')
484
+ self.weight.data = self.weight.data * mask
485
+ del self.sparse_pruning_mask
486
+ if self.sparse_pruning_method == 'topk':
487
+ del self.sparse_mask_scores
488
+ self.sparse_pruning_method = None
489
+ self.sparse_pruning_enabled = False
490
+ return None
491
+
492
+ def fix_channel_pruning_helper(self, mask=None, dim_reduction=False):
493
+ if mask is None:
494
+ if self.channel_pruning_method in ['l1', 'topk']:
495
+ mask = self.get_mask(pruning_type='channel').bool()
496
+ if dim_reduction:
497
+ start_bits = self.weight.start_bits
498
+ target_bits = self.weight.target_bits
499
+ q_period = self.weight.q_period
500
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...])
501
+ self.weight.start_bits = start_bits
502
+ self.weight.target_bits = target_bits
503
+ self.weight.q_period = q_period
504
+ if self.bias is not None:
505
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
506
+ else:
507
+ self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1)
508
+ if self.bias is not None:
509
+ self.bias.data = self.bias.data * mask.view(-1)
510
+ del self.channel_pruning_mask
511
+ if self.channel_pruning_method == 'topk':
512
+ del self.channel_mask_scores
513
+ self.channel_pruning_method = None
514
+ else:
515
+ raise NotImplementedError
516
+ else:
517
+ start_bits = self.weight.start_bits
518
+ target_bits = self.weight.target_bits
519
+ q_period = self.weight.q_period
520
+ self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...])
521
+ self.weight.start_bits = start_bits
522
+ self.weight.target_bits = target_bits
523
+ self.weight.q_period = q_period
524
+ mask = None
525
+ self.channel_pruning_enabled = False
526
+ return mask
527
+
528
+ def get_mask(self, pruning_type='sparse'):
529
+ if pruning_type == 'sparse':
530
+ if self.sparse_pruning_method == 'l1':
531
+ return self.sparse_pruning_mask.to(self.weight.device)
532
+ elif self.sparse_pruning_method == 'topk':
533
+ return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
534
+ else:
535
+ raise NotImplementedError
536
+ elif pruning_type == 'channel':
537
+ if self.channel_pruning_method == 'l1':
538
+ return self.channel_pruning_mask.to(self.weight.device)
539
+ elif self.channel_pruning_method == 'topk':
540
+ return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False)
541
+ else:
542
+ raise NotImplementedError
543
+ else:
544
+ raise NotImplementedError
545
+
546
+ def fix_weight_quantization(self):
547
+ self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
548
+ self.weight_quantize_num_groups).data
549
+ self.weight_quantization_enabled_in_forward = False
550
+ return None
551
+
552
+ def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
553
+ weight_quantization_enabled_in_forward, quantization_type, num_groups):
554
+ self.weight.start_bits = start_bits
555
+ self.weight.target_bits = target_bits
556
+ self.weight.q_period = quantization_period
557
+ self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
558
+ if self.weight_quantization_enabled_in_forward:
559
+ assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now'
560
+ logger.warning(
561
+ "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
562
+ )
563
+ if quantization_type == 'symmetric':
564
+ self.weight_quantizer = SymQuantizer.apply
565
+ else:
566
+ self.weight_quantizer = AsymQuantizer.apply
567
+ self.weight_quantize_num_groups = num_groups
568
+
569
+ def enable_activation_quantization(self, bits, quantization_type, range_calibration):
570
+ assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
571
+ self.activation_quantization_bits = bits
572
+ self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
573
+ if range_calibration == 'static':
574
+ self.activation_quantizer = QuantAct(quant_mode=quantization_type)
575
+ else:
576
+ if quantization_type == 'symmetric':
577
+ self.activation_quantizer = SymQuantizer.apply
578
+ else:
579
+ self.activation_quantizer = AsymQuantizer.apply
580
+
581
+ def forward(self, input):
582
+
583
+ if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
584
+ weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
585
+ self.weight_quantize_num_groups)
586
+ bias = self.bias
587
+ else:
588
+ weight = self.weight
589
+ bias = self.bias
590
+
591
+ if self.sparse_pruning_enabled and self.sparse_pruning_method:
592
+ mask = self.get_mask(pruning_type='sparse')
593
+ weight = weight * mask.view(self.weight.size())
594
+
595
+ if self.channel_pruning_enabled:
596
+ mask = self.get_mask(pruning_type='channel')
597
+ weight = weight * mask.view(-1, 1, 1, 1)
598
+ if bias is not None:
599
+ bias = bias * mask.view(-1)
600
+
601
+ if self.activation_quantization_enabled:
602
+ if 'dynamic' in self.activation_quantization_method:
603
+ num_groups = input.numel() // input[0].numel()
604
+ else:
605
+ num_groups = 1
606
+ input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
607
+
608
+ return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
609
+
610
+
611
+ class BNLayer_Compress(nn.BatchNorm2d):
612
+
613
+ def fix_channel_pruning_helper(self, mask, dim_reduction=True):
614
+ self.weight = nn.Parameter(self.weight.data[mask.view(-1)])
615
+ self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
616
+ self.running_mean = self.running_mean[mask.view(-1)]
617
+ self.running_var = self.running_var[mask.view(-1)]
618
+
619
+
620
+ def _reduce(input_):
621
+ """All-reduce the input tensor across model parallel group."""
622
+ group = g_mpu.get_model_parallel_group()
623
+
624
+ # Bypass the function if we are using only 1 GPU.
625
+ if dist.get_world_size(group=group) == 1:
626
+ return input_
627
+
628
+ # All-reduce.
629
+ dist.all_reduce(input_, group=group)
630
+
631
+ return input_
632
+
633
+
634
+ def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
635
+ """Split a tensor along its last dimension.
636
+ Arguments:
637
+ tensor: input tensor.
638
+ num_partitions: number of partitions to split the tensor
639
+ contiguous_split_chunks: If True, make each chunk contiguous
640
+ in memory.
641
+ """
642
+ # Get the size and dimension.
643
+ last_dim = tensor.dim() - 1
644
+ assert tensor.size()[last_dim] % num_partitions == 0
645
+ last_dim_size = tensor.size()[last_dim] // num_partitions
646
+ # Split.
647
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
648
+ # Note: torch.split does not create contiguous tensors by default.
649
+ if contiguous_split_chunks:
650
+ return tuple(chunk.contiguous() for chunk in tensor_list)
651
+
652
+ return tensor_list
653
+
654
+
655
+ def _split(input_):
656
+ """Split the tensor along its last dimension and keep the
657
+ corresponding slice."""
658
+ group = g_mpu.get_model_parallel_group()
659
+
660
+ # Bypass the function if we are using only 1 GPU.
661
+ if dist.get_world_size(group=group) == 1:
662
+ return input_
663
+
664
+ # Split along last dimension.
665
+ world_size = dist.get_world_size(group=group)
666
+ input_list = split_tensor_along_last_dim(input_, world_size)
667
+
668
+ # Note: torch.split does not create contiguous tensors by default.
669
+ rank = dist.get_rank(group=group)
670
+ output = input_list[rank].contiguous()
671
+
672
+ return output
673
+
674
+
675
+ def _gather(input_):
676
+ """Gather tensors and concatenate along the last dimension."""
677
+ group = g_mpu.get_model_parallel_group()
678
+
679
+ # Bypass the function if we are using only 1 GPU.
680
+ if dist.get_world_size(group=group) == 1:
681
+ return input_
682
+
683
+ # Size and dimension.
684
+ last_dim = input_.dim() - 1
685
+ rank = dist.get_rank(group=group)
686
+ world_size = dist.get_world_size(group=group)
687
+
688
+ tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
689
+ tensor_list[rank] = input_
690
+ dist.all_gather(tensor_list, input_, group=group)
691
+
692
+ # Note: torch.cat already creates a contiguous tensor.
693
+ output = torch.cat(tensor_list, dim=last_dim).contiguous()
694
+
695
+ return output
696
+
697
+
698
+ class _CopyToModelParallelRegion(torch.autograd.Function):
699
+ """Pass the input to the model parallel region."""
700
+
701
+ @staticmethod
702
+ def forward(ctx, input_):
703
+ return input_
704
+
705
+ @staticmethod
706
+ def backward(ctx, grad_output):
707
+ return _reduce(grad_output)
708
+
709
+
710
+ class _ReduceFromModelParallelRegion(torch.autograd.Function):
711
+ """All-reduce the input from the model parallel region."""
712
+
713
+ @staticmethod
714
+ def forward(ctx, input_):
715
+ return _reduce(input_)
716
+
717
+ @staticmethod
718
+ def backward(ctx, grad_output):
719
+ return grad_output
720
+
721
+
722
+ class _ScatterToModelParallelRegion(torch.autograd.Function):
723
+ """Split the input and keep only the corresponding chuck to the rank."""
724
+
725
+ @staticmethod
726
+ def forward(ctx, input_):
727
+ return _split(input_)
728
+
729
+ @staticmethod
730
+ def backward(ctx, grad_output):
731
+ return _gather(grad_output)
732
+
733
+
734
+ class _GatherFromModelParallelRegion(torch.autograd.Function):
735
+ """Gather the input from model parallel region and concatenate."""
736
+
737
+ @staticmethod
738
+ def forward(ctx, input_):
739
+ return _gather(input_)
740
+
741
+ @staticmethod
742
+ def backward(ctx, grad_output):
743
+ return _split(grad_output)
744
+
745
+
746
+ # -----------------
747
+ # Helper functions.
748
+ # -----------------
749
+
750
+
751
+ def copy_to_model_parallel_region(input_):
752
+ return _CopyToModelParallelRegion.apply(input_)
753
+
754
+
755
+ def reduce_from_model_parallel_region(input_):
756
+ return _ReduceFromModelParallelRegion.apply(input_)
757
+
758
+
759
+ def scatter_to_model_parallel_region(input_):
760
+ return _ScatterToModelParallelRegion.apply(input_)
761
+
762
+
763
+ def gather_from_model_parallel_region(input_):
764
+ return _GatherFromModelParallelRegion.apply(input_)
765
+
766
+
767
+ class ColumnParallelLinear_Compress(LinearLayer_Compress):
768
+
769
+ def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False):
770
+ # Keep input parameters
771
+ global g_mpu
772
+ g_mpu = mpu
773
+ self.input_size = input_size
774
+ self.output_size = output_size
775
+ self.gather_output = gather_output
776
+ self.skip_bias_add = skip_bias_add
777
+
778
+ # Divide the weight matrix along the last dimension.
779
+ world_size = mpu.get_model_parallel_world_size()
780
+ assert output_size % world_size == 0
781
+ self.output_size_per_partition = output_size // world_size
782
+
783
+ super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias)
784
+
785
+ def forward(self, input_):
786
+ # Set up backprop all-reduce.
787
+ input_parallel = copy_to_model_parallel_region(input_)
788
+ # Matrix multiply.
789
+ if self.skip_bias_add:
790
+ output_parallel, bias = super().forward(input_parallel, True)
791
+ else:
792
+ output_parallel = super().forward(input_parallel)
793
+ bias = None
794
+ if self.gather_output:
795
+ # All-gather across the partitions.
796
+ output = gather_from_model_parallel_region(output_parallel)
797
+ else:
798
+ output = output_parallel
799
+ return output, bias
800
+
801
+
802
+ class RowParallelLinear_Compress(LinearLayer_Compress):
803
+
804
+ def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False):
805
+ # Keep input parameters
806
+ global g_mpu
807
+ g_mpu = mpu
808
+ self.input_size = input_size
809
+ self.output_size = output_size
810
+ self.input_is_parallel = input_is_parallel
811
+ self.skip_bias_add = skip_bias_add
812
+
813
+ # Divide the weight matrix along the last dimension.
814
+ world_size = mpu.get_model_parallel_world_size()
815
+ assert input_size % world_size == 0
816
+ self.input_size_per_partition = input_size // world_size
817
+
818
+ super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias)
819
+
820
+ def forward(self, input_):
821
+ # Set up backprop all-reduce.
822
+ if self.input_is_parallel:
823
+ input_parallel = input_
824
+ else:
825
+ input_parallel = scatter_to_model_parallel_region(input_)
826
+ # Matrix multiply.
827
+ output_parallel, bias = super().forward(input_parallel, True)
828
+
829
+ # All-reduce across all the partitions.
830
+ output_ = reduce_from_model_parallel_region(output_parallel)
831
+ if not self.skip_bias_add:
832
+ if bias is not None:
833
+ output = output_ + bias
834
+ else:
835
+ output = output_
836
+ output_bias = None
837
+ else:
838
+ output = output_
839
+ output_bias = bias
840
+ return output, output_bias
venv/lib/python3.10/site-packages/deepspeed/compression/compress.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import re
7
+ from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible
8
+ from .config import get_compression_config
9
+ from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
10
+ from .constants import *
11
+ import os
12
+ import json
13
+
14
+ try:
15
+ import neural_compressor as nc
16
+ except ImportError as e:
17
+ nc = None
18
+
19
+
20
+ def check_deepspeed_config(config):
21
+ if isinstance(config, dict):
22
+ return config
23
+ elif os.path.exists(config):
24
+ return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
25
+ else:
26
+ raise ValueError(
27
+ f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}")
28
+
29
+
30
+ def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True):
31
+ '''
32
+ get the associated module name from the model based on the key_word provided by users
33
+ '''
34
+ return_module_name = []
35
+ for name, module in model.named_modules():
36
+
37
+ module_check = is_module_compressible(module, mpu)
38
+
39
+ if re.search(key_word, name) is not None and module_check:
40
+ if name in exist_module_name and verbose:
41
+ # logger.warning
42
+ raise ValueError(
43
+ f"{name} is already added to compression, please check your config file for {group_name}.")
44
+ if name not in exist_module_name:
45
+ exist_module_name.add(name)
46
+ return_module_name.append(name)
47
+ return return_module_name, exist_module_name
48
+
49
+
50
+ def get_compress_methods(model, compress_methods, mpu=None):
51
+ # extract the compression module for each method in compress_methods
52
+ layer_added_compress_methods = []
53
+ for method, method_content in compress_methods.items():
54
+ if LAYER_REDUCTION in method:
55
+ continue
56
+ # for loop different methods, i.e., weight quantization, activation quantization etc
57
+ exist_module_name = set()
58
+ shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters
59
+ for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
60
+ # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc
61
+ module_name_list = []
62
+ related_module_name_list = []
63
+ if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]:
64
+ # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them
65
+ # otherwise we just mask those as zeros
66
+ for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE],
67
+ method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]):
68
+ module_name, exist_module_name = get_module_name(group_name,
69
+ model,
70
+ key_word,
71
+ exist_module_name,
72
+ mpu=mpu)
73
+ module_name_list.append(module_name)
74
+ tmp_related_module_name_list = []
75
+ for rkw in related_key_words:
76
+ # related key word can be a list, for instance the QKV for O matrix in Attention
77
+ module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu)
78
+ tmp_related_module_name_list.append(module_name)
79
+ related_module_name_list.append(tmp_related_module_name_list)
80
+ else:
81
+ for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
82
+ module_name, exist_module_name = get_module_name(group_name,
83
+ model,
84
+ key_word,
85
+ exist_module_name,
86
+ mpu=mpu)
87
+ module_name_list.append(module_name)
88
+
89
+ if module_name_list:
90
+ # combine shared parameters with each group
91
+ combined_method_parameters = {
92
+ **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)),
93
+ **shared_parameters
94
+ }
95
+ compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}]
96
+ layer_added_compress_methods.append(compression_item)
97
+ return layer_added_compress_methods
98
+
99
+
100
+ def init_compression(model, deepspeed_config, teacher_model=None, mpu=None):
101
+ """
102
+ Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules
103
+ Args:
104
+ model (`torch.nn.Module`)
105
+ The model to compress.
106
+ deepspeed_config (`DeepSpeedConfig`)
107
+ The path of ds_config
108
+ mpu
109
+ The mpu module for Row/Column parallelism
110
+ """
111
+ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
112
+ if hasattr(model, 'module'):
113
+ c_model = model.module
114
+ else:
115
+ c_model = model
116
+
117
+ # For layer reduction
118
+ if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]:
119
+ assert teacher_model is not None, "Teacher model is required for layer reduction"
120
+ student_initialization(c_model, teacher_model, deepspeed_config)
121
+
122
+ layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu)
123
+ compression_preparation(c_model, layer_added_compress_methods, mpu)
124
+
125
+ # For sparse pruning snip_momentum method
126
+ shared_parameters = compress_methods[SPARSE_PRUNING][SHARED_PARAMETERS]
127
+ if shared_parameters[SPARSE_PRUNING_ENABLED] and \
128
+ shared_parameters[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
129
+
130
+ assert nc is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
131
+
132
+ from .helper import generate_pruners, register_on_step_begin
133
+ from nc import WeightPruningConfig
134
+
135
+ config = WeightPruningConfig(target_sparsity=1 - shared_parameters[SPARSE_PRUNING_DENSE_RATIO],
136
+ pattern=shared_parameters[SPARSE_PRUNING_BLOCK_PATTERN],
137
+ pruning_frequency=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE],
138
+ start_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET],
139
+ end_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_END],
140
+ excluded_op_names=shared_parameters[SPARSE_PRUNING_EXCLUDED_MODULES])
141
+ pruners = generate_pruners(config, c_model)
142
+ c_model.pruners = pruners
143
+ register_on_step_begin(c_model)
144
+
145
+ return model
146
+
147
+
148
+ def redundancy_clean(model, deepspeed_config, mpu=None):
149
+ """
150
+ Remove the redundancy of a model
151
+ Args:
152
+ model (`torch.nn.Module`)
153
+ The model to compress.
154
+ deepspeed_config (`DeepSpeedConfig`)
155
+ The path of ds_config
156
+ mpu
157
+ The mpu module for Row/Column parallelism
158
+ """
159
+ compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
160
+ if hasattr(model, 'module'):
161
+ c_model = model.module
162
+ else:
163
+ c_model = model
164
+
165
+ layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu)
166
+ # sort methods
167
+ order_list = [
168
+ WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION
169
+ ]
170
+ layer_added_compress_methods = sorted(layer_added_compress_methods_tmp,
171
+ key=lambda x: order_list.index(list(x[2].keys())[0]))
172
+
173
+ for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods:
174
+ stored_mask = []
175
+ need_mask = True if related_module_name_lists else False
176
+ for i, mnl in enumerate(module_name_lists):
177
+ for module_name in mnl:
178
+ mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask)
179
+ if need_mask:
180
+ stored_mask.append(mask)
181
+ if need_mask:
182
+ for rmnl in related_module_name_lists[i]:
183
+ for j, module_name in enumerate(rmnl):
184
+ mask = fix_compression(c_model,
185
+ module_name,
186
+ compression_technique,
187
+ mask=stored_mask[j],
188
+ dim_reduction=True)
189
+ return model
190
+
191
+
192
+ def student_initialization(student_model, teacher_model, deepspeed_config):
193
+ '''
194
+ Given a student model and a teacher model, select the
195
+ Args:
196
+ student_model (`torch.nn.Module`)
197
+ The model we will update weight
198
+ teacher_model (`torch.nn.Module`)
199
+ The model guide the student to learn
200
+ deepspeed_config (`DeepSpeedConfig`)
201
+ The path of ds_config
202
+ '''
203
+ config = get_compression_config(check_deepspeed_config(deepspeed_config))
204
+ compress_methods = config[LAYER_REDUCTION]
205
+
206
+ module_name_prefix = compress_methods[MODULE_NAME_PREFIX]
207
+ teacher_layer = compress_methods[TEACHER_LAYER]
208
+ student_layer = [i for i in range(len(teacher_layer))]
209
+ other_module_name = compress_methods[OTHER_MODULE_NAME]
210
+ '''
211
+ name_prefix (`str`)
212
+ The prefix name before the layer #.
213
+ Example 1: bert.encoder.layer, for BERT_base model's prefix name
214
+ Example 2: transformer.h, for GPT-2 hugging face prefix name
215
+ teacher_layer (`list of integers`)
216
+ The layer of teacher will be used for student's reinitialization
217
+ Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student
218
+ student_layer (`list` or None)
219
+ The layer of student need to be re-initialized
220
+ Example 1: None, means we want to reinitialize all the layers
221
+ Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers
222
+ other_module_name (`list of string`)
223
+ The modules will be used for student's reinitialization
224
+ Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student
225
+ Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embedding layers module to the student
226
+ Note that teacher_layer should matches student layer
227
+ '''
228
+ assert len(student_layer) == len(teacher_layer)
229
+ for s_name, t_name in zip(student_layer, teacher_layer):
230
+ s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name))
231
+ t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name))
232
+ for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
233
+ s_param.data.copy_(t_param.data)
234
+ for name in other_module_name:
235
+ s_module = recursive_getattr(student_model, name)
236
+ t_module = recursive_getattr(teacher_model, name)
237
+ print(name)
238
+ for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
239
+ s_param.data.copy_(t_param.data)
venv/lib/python3.10/site-packages/deepspeed/compression/config.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .constants import *
7
+ import copy
8
+ from ..runtime.config_utils import get_scalar_param, get_list_param
9
+
10
+
11
+ def get_compression_config(param_dict):
12
+ #
13
+ output = {}
14
+
15
+ if COMPRESSION_TRAINING not in param_dict.keys():
16
+ param_dict[COMPRESSION_TRAINING] = {}
17
+ sub_param_dict = param_dict[COMPRESSION_TRAINING]
18
+ output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict)
19
+ output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict)
20
+ output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict)
21
+ output[ROW_PRUNING] = get_row_pruning(sub_param_dict)
22
+ output[HEAD_PRUNING] = get_head_pruning(sub_param_dict)
23
+ output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict)
24
+
25
+ output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict)
26
+
27
+ return output
28
+
29
+
30
+ def get_layer_reduction(param_dict):
31
+ output = {}
32
+ output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT
33
+ if get_layer_reduction_enabled(param_dict):
34
+ output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict)
35
+ for key, val in get_layer_reduction_params(param_dict).items():
36
+ output[key] = val
37
+ return output
38
+
39
+
40
+ def get_layer_reduction_enabled(param_dict):
41
+ if LAYER_REDUCTION in param_dict.keys():
42
+ return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT)
43
+ else:
44
+ return False
45
+
46
+
47
+ def get_layer_reduction_params(param_dict):
48
+ if LAYER_REDUCTION in param_dict.keys():
49
+ layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION])
50
+ layer_reduction_params.pop(LAYER_REDUCTION_ENABLED)
51
+ return layer_reduction_params
52
+ else:
53
+ return False
54
+
55
+
56
+ def get_quantize_enabled(param_dict):
57
+ if COMPRESSION_TRAINING not in param_dict.keys():
58
+ return False
59
+
60
+ sub_param_dict = param_dict[COMPRESSION_TRAINING]
61
+ output = get_weight_quantization_shared_parameters(sub_param_dict)
62
+ return output[WEIGHT_QUANTIZE_ENABLED]
63
+
64
+
65
+ def get_weight_quantization(param_dict):
66
+ output = {}
67
+ if WEIGHT_QUANTIZATION not in param_dict.keys():
68
+ param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
69
+ sub_param_dict = param_dict[WEIGHT_QUANTIZATION]
70
+ # shared parameters
71
+ output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict)
72
+ # each sub-groups
73
+ if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]:
74
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
75
+ ), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
76
+ output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict)
77
+ return output
78
+
79
+
80
+ def get_weight_quantization_shared_parameters(param_dict):
81
+ output = {}
82
+ if SHARED_PARAMETERS in param_dict.keys():
83
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
84
+ output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED,
85
+ WEIGHT_QUANTIZE_ENABLED_DEFAULT)
86
+ output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL,
87
+ WEIGHT_QUANTIZE_KERNEL_DEFAULT)
88
+ output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET,
89
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
90
+ output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS,
91
+ WEIGHT_QUANTIZE_GROUPS_DEFAULT)
92
+ output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE,
93
+ WEIGHT_QUANTIZE_VERBOSE_DEFAULT)
94
+ output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE,
95
+ WEIGHT_QUANTIZE_TYPE_DEFAULT)
96
+ output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict,
97
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED,
98
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT)
99
+ assert output[WEIGHT_QUANTIZE_TYPE] in [
100
+ WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC
101
+ ], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]"
102
+ output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING,
103
+ WEIGHT_QUANTIZE_ROUNDING_DEFAULT)
104
+ assert output[WEIGHT_QUANTIZE_ROUNDING] in [
105
+ WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING
106
+ ], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]"
107
+ if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys():
108
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param(
109
+ sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED,
110
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT)
111
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param(
112
+ sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO,
113
+ WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT)
114
+ else:
115
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
116
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
117
+ else:
118
+ output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT
119
+ output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT
120
+ output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
121
+ output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT
122
+ output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT
123
+ output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT
124
+ output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT
125
+ output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
126
+ output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
127
+ return output
128
+
129
+
130
+ def get_weight_quantization_different_groups(param_dict):
131
+ output = {}
132
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
133
+
134
+ def get_params(name, group_dict):
135
+ assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(
136
+ ), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}"
137
+ assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(
138
+ ), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}"
139
+ group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD,
140
+ WEIGHT_QUANTIZATION_PERIOD_DEFAULT)
141
+ return group_dict
142
+
143
+ for k, v in sub_param_dict.items():
144
+ output[k] = {}
145
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
146
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
147
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
148
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
149
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
150
+
151
+ return output
152
+
153
+
154
+ def get_activation_quantization(param_dict):
155
+ output = {}
156
+ if ACTIVATION_QUANTIZATION not in param_dict.keys():
157
+ param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
158
+ sub_param_dict = param_dict[ACTIVATION_QUANTIZATION]
159
+ # shared parameters
160
+ output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict)
161
+ # each sub-groups
162
+ if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]:
163
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
164
+ ), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
165
+ output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict)
166
+ return output
167
+
168
+
169
+ def get_activation_quantization_shared_parameters(param_dict):
170
+ output = {}
171
+ if SHARED_PARAMETERS in param_dict.keys():
172
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
173
+ output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED,
174
+ ACTIVATION_QUANTIZATION_ENABLED_DEFAULT)
175
+ output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE,
176
+ ACTIVATION_QUANTIZE_TYPE_DEFAULT)
177
+ assert output[ACTIVATION_QUANTIZE_TYPE] in [
178
+ ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC
179
+ ], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]"
180
+ output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE,
181
+ ACTIVATION_QUANTIZE_RANGE_DEFAULT)
182
+ assert output[ACTIVATION_QUANTIZE_RANGE] in [
183
+ ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC
184
+ ], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]"
185
+ output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict,
186
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET,
187
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
188
+ else:
189
+ output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT
190
+ output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT
191
+ output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT
192
+ output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
193
+ return output
194
+
195
+
196
+ def get_activation_quantization_different_groups(param_dict):
197
+ output = {}
198
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
199
+
200
+ def get_params(name, group_dict):
201
+ assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(
202
+ ), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}"
203
+ return group_dict
204
+
205
+ for k, v in sub_param_dict.items():
206
+ output[k] = {}
207
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
208
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
209
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
210
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
211
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
212
+
213
+ return output
214
+
215
+
216
+ def get_sparse_pruning(param_dict):
217
+ output = {}
218
+ if SPARSE_PRUNING not in param_dict.keys():
219
+ param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
220
+ sub_param_dict = param_dict[SPARSE_PRUNING]
221
+ # shared parameters
222
+ output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict)
223
+ # each sub-groups
224
+ if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED] and output[SHARED_PARAMETERS][
225
+ SPARSE_PRUNING_METHOD] != SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
226
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
227
+ ), f"Sparse Pruning is enabled and not snip_momentum method, {DIFFERENT_GROUPS} must be specified"
228
+ output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict)
229
+ return output
230
+
231
+
232
+ def get_sparse_pruning_shared_parameters(param_dict):
233
+ output = {}
234
+
235
+ if SHARED_PARAMETERS in param_dict.keys():
236
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
237
+ output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED,
238
+ SPARSE_PRUNING_ENABLED_DEFAULT)
239
+ output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD,
240
+ SPARSE_PRUNING_METHOD_DEFAULT)
241
+ assert output[SPARSE_PRUNING_METHOD] in [
242
+ SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK, SPARSE_PRUNING_METHOD_SNIP_MOMENTUM
243
+ ], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}, {SPARSE_PRUNING_METHOD_SNIP_MOMENTUM}]"
244
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET,
245
+ SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT)
246
+ if output[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
247
+ output[SPARSE_PRUNING_BLOCK_PATTERN] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_BLOCK_PATTERN,
248
+ SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT)
249
+ output[SPARSE_PRUNING_DENSE_RATIO] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_DENSE_RATIO,
250
+ SPARSE_PRUNING_DENSE_RATIO_DEFAULT)
251
+ assert output[SPARSE_PRUNING_DENSE_RATIO] > 0 and output[
252
+ SPARSE_PRUNING_DENSE_RATIO] < 1, f"Invalid dense_ratio value. Must be less than 1"
253
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE] = get_scalar_param(
254
+ sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT)
255
+ output[SPARSE_PRUNING_EXCLUDED_MODULES] = get_list_param(sub_param_dict, SPARSE_PRUNING_EXCLUDED_MODULES,
256
+ SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT)
257
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET_END] = get_scalar_param(sub_param_dict,
258
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END,
259
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET])
260
+ assert output[SPARSE_PRUNING_SCHEDULE_OFFSET] <= output[
261
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END], f"Invalid schedule_offset and schedule_offset_end values"
262
+ else:
263
+ output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT
264
+ output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT
265
+ output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
266
+ return output
267
+
268
+
269
+ def get_sparse_pruning_different_groups(param_dict):
270
+ output = {}
271
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
272
+
273
+ def get_params(name, group_dict):
274
+ assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(
275
+ ), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}"
276
+ return group_dict
277
+
278
+ for k, v in sub_param_dict.items():
279
+ output[k] = {}
280
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
281
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
282
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
283
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
284
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
285
+
286
+ return output
287
+
288
+
289
+ def get_row_pruning(param_dict):
290
+ output = {}
291
+ if ROW_PRUNING not in param_dict.keys():
292
+ param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
293
+ sub_param_dict = param_dict[ROW_PRUNING]
294
+ # shared parameters
295
+ output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict)
296
+ # each sub-groups
297
+ if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]:
298
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
299
+ ), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
300
+ output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict)
301
+ return output
302
+
303
+
304
+ def get_row_pruning_shared_parameters(param_dict):
305
+ output = {}
306
+ if SHARED_PARAMETERS in param_dict.keys():
307
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
308
+ output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED,
309
+ ROW_PRUNING_ENABLED_DEFAULT)
310
+ output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT)
311
+ assert output[ROW_PRUNING_METHOD] in [
312
+ ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK
313
+ ], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]"
314
+ output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET,
315
+ ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT)
316
+ else:
317
+ output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT
318
+ output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT
319
+ output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT
320
+ return output
321
+
322
+
323
+ def get_row_pruning_different_groups(param_dict):
324
+ output = {}
325
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
326
+
327
+ def get_params(name, group_dict):
328
+ assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(
329
+ ), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}"
330
+ return group_dict
331
+
332
+ for k, v in sub_param_dict.items():
333
+ output[k] = {}
334
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
335
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
336
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
337
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
338
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
339
+ return output
340
+
341
+
342
+ def get_head_pruning(param_dict):
343
+ output = {}
344
+ if HEAD_PRUNING not in param_dict.keys():
345
+ param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
346
+ sub_param_dict = param_dict[HEAD_PRUNING]
347
+ # shared parameters
348
+ output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict)
349
+ # each sub-groups
350
+ if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]:
351
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
352
+ ), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
353
+ output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict)
354
+ return output
355
+
356
+
357
+ def get_head_pruning_shared_parameters(param_dict):
358
+ output = {}
359
+ if SHARED_PARAMETERS in param_dict.keys():
360
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
361
+ output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED,
362
+ HEAD_PRUNING_ENABLED_DEFAULT)
363
+ output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD,
364
+ HEAD_PRUNING_METHOD_DEFAULT)
365
+ assert output[HEAD_PRUNING_METHOD] in [
366
+ HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK
367
+ ], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]"
368
+ output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET,
369
+ HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT)
370
+ if output[HEAD_PRUNING_ENABLED]:
371
+ assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(
372
+ ), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning"
373
+ output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS]
374
+ else:
375
+ output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT
376
+ output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT
377
+ output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT
378
+ return output
379
+
380
+
381
+ def get_head_pruning_different_groups(param_dict):
382
+ output = {}
383
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
384
+
385
+ def get_params(name, group_dict):
386
+ assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(
387
+ ), f"dense_ratio must be specified for head pruning group {name}"
388
+ return group_dict
389
+
390
+ for k, v in sub_param_dict.items():
391
+ output[k] = {}
392
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
393
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
394
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
395
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
396
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
397
+ return output
398
+
399
+
400
+ def get_channel_pruning(param_dict):
401
+ output = {}
402
+ if CHANNEL_PRUNING not in param_dict.keys():
403
+ param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
404
+ sub_param_dict = param_dict[CHANNEL_PRUNING]
405
+ # shared parameters
406
+ output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict)
407
+ # each sub-groups
408
+ if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]:
409
+ assert DIFFERENT_GROUPS in sub_param_dict.keys(
410
+ ), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
411
+ output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict)
412
+ return output
413
+
414
+
415
+ def get_channel_pruning_shared_parameters(param_dict):
416
+ output = {}
417
+ if SHARED_PARAMETERS in param_dict.keys():
418
+ sub_param_dict = param_dict[SHARED_PARAMETERS]
419
+ output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED,
420
+ CHANNEL_PRUNING_ENABLED_DEFAULT)
421
+ output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD,
422
+ CHANNEL_PRUNING_METHOD_DEFAULT)
423
+ assert output[CHANNEL_PRUNING_METHOD] in [
424
+ CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK
425
+ ], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]"
426
+ output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET,
427
+ CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT)
428
+ else:
429
+ output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT
430
+ output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT
431
+ output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT
432
+ return output
433
+
434
+
435
+ def get_channel_pruning_different_groups(param_dict):
436
+ output = {}
437
+ sub_param_dict = param_dict[DIFFERENT_GROUPS]
438
+
439
+ def get_params(name, group_dict):
440
+ assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(
441
+ ), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}"
442
+ return group_dict
443
+
444
+ for k, v in sub_param_dict.items():
445
+ output[k] = {}
446
+ output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
447
+ output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
448
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
449
+ output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
450
+ sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
451
+
452
+ return output
venv/lib/python3.10/site-packages/deepspeed/compression/constants.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ #########################################
7
+ # Compression Methods
8
+ # It has several sub-components
9
+ # #########################################
10
+ COMPRESSION_TRAINING = "compression_training"
11
+ SHARED_PARAMETERS = "shared_parameters"
12
+ DIFFERENT_GROUPS = "different_groups"
13
+ TECHNIQUE_ENABLED = "enabled"
14
+ TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset"
15
+ TECHNIQUE_SCHEDULE_OFFSET_END = "schedule_offset_end"
16
+ DIFFERENT_GROUPS_PARAMETERS = "params"
17
+ DIFFERENT_GROUPS_MODULE_SCOPE = "modules"
18
+ DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*"
19
+ DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules"
20
+ DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None
21
+ # COMPRESSION_TRAINING_ENABLED = "enabled"
22
+ # COMPRESSION_TRAINING_ENABLED_DEFAULT = False
23
+
24
+ ####
25
+ # Layer Reduction
26
+ ####
27
+ LAYER_REDUCTION = "layer_reduction"
28
+ LAYER_REDUCTION_ENABLED = "enabled"
29
+ LAYER_REDUCTION_ENABLED_DEFAULT = False
30
+ KEEP_NUMBER_LAYER = "keep_number_layer"
31
+ MODULE_NAME_PREFIX = "module_name_prefix"
32
+ TEACHER_LAYER = "teacher_layer"
33
+ OTHER_MODULE_NAME = "other_module_name"
34
+
35
+ ####
36
+ # Weight Quantization
37
+ ####
38
+ WEIGHT_QUANTIZATION = "weight_quantization"
39
+
40
+ WEIGHT_QUANTIZATION_PERIOD = "quantization_period"
41
+ WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1
42
+
43
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward"
44
+ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False
45
+
46
+ WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED
47
+ WEIGHT_QUANTIZE_ENABLED_DEFAULT = False
48
+
49
+ WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel"
50
+ WEIGHT_QUANTIZE_KERNEL_DEFAULT = False
51
+
52
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
53
+ WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0
54
+
55
+ WEIGHT_QUANTIZE_GROUPS = "quantize_groups"
56
+ WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1
57
+
58
+ WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose"
59
+ WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False
60
+
61
+ WEIGHT_QUANTIZE_TYPE = "quantization_type"
62
+ WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric"
63
+ WEIGHT_QUANTIZE_SYMMETRIC = "symmetric"
64
+ WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric"
65
+
66
+ WEIGHT_QUANTIZE_ROUNDING = "rounding"
67
+ WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest"
68
+ WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic"
69
+ WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest"
70
+ # maybe deleted for a cleaner version
71
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize"
72
+
73
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled"
74
+ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False
75
+
76
+ WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio"
77
+ WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001
78
+
79
+ WEIGHT_QUANTIZE_START_BITS = "start_bits"
80
+ WEIGHT_QUANTIZE_TARGET_BITS = "target_bits"
81
+ ###
82
+ # Activation Quantization
83
+ ###
84
+ ACTIVATION_QUANTIZATION = "activation_quantization"
85
+
86
+ ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED
87
+ ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False
88
+
89
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
90
+ ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000
91
+
92
+ ACTIVATION_QUANTIZE_TYPE = "quantization_type"
93
+ ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric"
94
+ ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric"
95
+ ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric"
96
+
97
+ ACTIVATION_QUANTIZE_RANGE = 'range_calibration'
98
+ ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic'
99
+ ACTIVATION_QUANTIZE_RANGE_STATIC = 'static'
100
+ ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic'
101
+
102
+ ACTIVATION_QUANTIZE_BITS = "bits"
103
+ ###
104
+ # Sparse Pruning
105
+ ###
106
+ SPARSE_PRUNING = "sparse_pruning"
107
+
108
+ SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED
109
+ SPARSE_PRUNING_ENABLED_DEFAULT = False
110
+
111
+ SPARSE_PRUNING_METHOD = "method"
112
+ SPARSE_PRUNING_METHOD_DEFAULT = "l1"
113
+ SPARSE_PRUNING_METHOD_L1 = "l1"
114
+ SPARSE_PRUNING_METHOD_TOPK = "topk"
115
+ SPARSE_PRUNING_METHOD_SNIP_MOMENTUM = "snip_momentum"
116
+
117
+ SPARSE_PRUNING_BLOCK_PATTERN = "block_pattern"
118
+ SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT = "4x1"
119
+
120
+ SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE = "schedule_offset_stride"
121
+ SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT = 1
122
+
123
+ SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
124
+ SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
125
+
126
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END = TECHNIQUE_SCHEDULE_OFFSET_END
127
+ SPARSE_PRUNING_SCHEDULE_OFFSET_END_DEFAULT = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
128
+
129
+ SPARSE_PRUNING_DENSE_RATIO = "dense_ratio"
130
+ SPARSE_PRUNING_DENSE_RATIO_DEFAULT = 0.1
131
+
132
+ SPARSE_PRUNING_EXCLUDED_MODULES = "excluded_modules"
133
+ SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT = []
134
+ ###
135
+ # Row Pruning
136
+ ###
137
+ ROW_PRUNING = "row_pruning"
138
+
139
+ ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED
140
+ ROW_PRUNING_ENABLED_DEFAULT = False
141
+
142
+ ROW_PRUNING_METHOD = "method"
143
+ ROW_PRUNING_METHOD_DEFAULT = "l1"
144
+ ROW_PRUNING_METHOD_L1 = "l1"
145
+ ROW_PRUNING_METHOD_TOPK = "topk"
146
+
147
+ ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
148
+ ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
149
+
150
+ ROW_PRUNING_DENSE_RATIO = "dense_ratio"
151
+
152
+ ###
153
+ # Head Pruning
154
+ ###
155
+ HEAD_PRUNING = "head_pruning"
156
+
157
+ HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED
158
+ HEAD_PRUNING_ENABLED_DEFAULT = False
159
+
160
+ HEAD_PRUNING_METHOD = "method"
161
+ HEAD_PRUNING_METHOD_DEFAULT = "topk"
162
+ HEAD_PRUNING_METHOD_L1 = "l1"
163
+ HEAD_PRUNING_METHOD_TOPK = "topk"
164
+
165
+ HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
166
+ HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
167
+
168
+ HEAD_PRUNING_NUM_HEADS = "num_heads"
169
+
170
+ HEAD_PRUNING_DENSE_RATIO = "dense_ratio"
171
+
172
+ ###
173
+ # Channel Pruning
174
+ ###
175
+ CHANNEL_PRUNING = "channel_pruning"
176
+
177
+ CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED
178
+ CHANNEL_PRUNING_ENABLED_DEFAULT = False
179
+
180
+ CHANNEL_PRUNING_METHOD = "method"
181
+ CHANNEL_PRUNING_METHOD_DEFAULT = "l1"
182
+ CHANNEL_PRUNING_METHOD_L1 = "l1"
183
+ CHANNEL_PRUNING_METHOD_TOPK = "topk"
184
+
185
+ CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
186
+ CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
187
+
188
+ CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio"
venv/lib/python3.10/site-packages/deepspeed/compression/helper.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
8
+ from .constants import *
9
+ from deepspeed.utils import logger
10
+
11
+ try:
12
+ from neural_compressor.compression import pruner as nc_pruner
13
+ except ImportError as e:
14
+ nc_pruner = None
15
+
16
+
17
+ def recursive_getattr(model, module_name):
18
+ """
19
+ Recursively get the attribute of a module.
20
+ Args:
21
+ model (`torch.nn.Module`)
22
+ The model to get the attribute from.
23
+ module_name (`str`)
24
+ The name of the module to get the attribute from.
25
+ """
26
+ split_list = module_name.split('.')
27
+ output = model
28
+ for name in split_list:
29
+ output = getattr(output, name)
30
+ return output
31
+
32
+
33
+ def recursive_setattr(model, module_name, module):
34
+ """
35
+ Recursively set the attribute of a module.
36
+ Args:
37
+ model (`torch.nn.Module`)
38
+ The model to set the attribute in.
39
+ module_name (`str`)
40
+ The name of the module to set the attribute in.
41
+ module (`torch.nn.Module`)
42
+ The module to set the attribute to.
43
+ """
44
+ split_list = module_name.split('.')
45
+ output = model
46
+ for name in split_list[:-1]:
47
+ output = getattr(output, name)
48
+ output.__setattr__(split_list[-1], module)
49
+
50
+
51
+ def module_replacement(model, module_name, compression_technique=None, mpu=None):
52
+ """
53
+ Replace a module with a new module.
54
+ Args:
55
+ model (`torch.nn.Module`)
56
+ The model to replace the module in.
57
+ module_name (`str`)
58
+ The name of the module to replace.
59
+ compression_technique (`str`)
60
+ The compression technique to use for the new module.
61
+ """
62
+
63
+ # Get the old module
64
+ old_module = recursive_getattr(model, module_name)
65
+
66
+ need_bias = False
67
+ if hasattr(old_module, 'bias') and old_module.bias is not None:
68
+ need_bias = True
69
+
70
+ # Initialize the new module
71
+ if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear):
72
+ if isinstance(old_module, LinearLayer_Compress):
73
+ new_module = old_module
74
+ else:
75
+ new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features,
76
+ bias=need_bias).to(device=old_module.weight.device,
77
+ dtype=old_module.weight.dtype)
78
+ new_module.weight.data = old_module.weight.data
79
+ if need_bias:
80
+ new_module.bias.data = old_module.bias.data
81
+ elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d):
82
+ if isinstance(old_module, Conv2dLayer_Compress):
83
+ new_module = old_module
84
+ else:
85
+ new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \
86
+ old_module.dilation, old_module.groups, need_bias, \
87
+ old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
88
+ new_module.weight.data = old_module.weight.data
89
+ if need_bias:
90
+ new_module.bias.data = old_module.bias.data
91
+ elif isinstance(old_module, torch.nn.BatchNorm2d):
92
+ new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine,
93
+ old_module.track_running_stats).to(old_module.weight.device,
94
+ old_module.weight.dtype)
95
+ new_module.weight.data = old_module.weight.data
96
+ if need_bias:
97
+ new_module.bias.data = old_module.bias.data
98
+ new_module.running_mean.data = old_module.running_mean.data
99
+ new_module.running_var.data = old_module.running_var.data
100
+ elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding):
101
+ if isinstance(old_module, Embedding_Compress):
102
+ new_module = old_module
103
+ else:
104
+ new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \
105
+ old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
106
+ new_module.weight.data = old_module.weight.data
107
+ elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress)
108
+ or isinstance(old_module, mpu.ColumnParallelLinear)):
109
+ if isinstance(old_module, ColumnParallelLinear_Compress):
110
+ new_module = old_module
111
+ else:
112
+ new_module = ColumnParallelLinear_Compress(mpu,
113
+ old_module.input_size,
114
+ old_module.output_size,
115
+ gather_output=old_module.gather_output,
116
+ skip_bias_add=old_module.skip_bias_add,
117
+ bias=need_bias).to(device=old_module.weight.device,
118
+ dtype=old_module.weight.dtype)
119
+ new_module.weight.data = old_module.weight.data
120
+ if need_bias:
121
+ new_module.bias.data = old_module.bias.data
122
+ elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress)
123
+ or isinstance(old_module, mpu.RowParallelLinear)):
124
+ if isinstance(old_module, RowParallelLinear_Compress):
125
+ new_module = old_module
126
+ else:
127
+ new_module = RowParallelLinear_Compress(mpu,
128
+ old_module.input_size,
129
+ old_module.output_size,
130
+ input_is_parallel=old_module.input_is_parallel,
131
+ skip_bias_add=old_module.skip_bias_add,
132
+ bias=need_bias).to(device=old_module.weight.device,
133
+ dtype=old_module.weight.dtype)
134
+ new_module.weight.data = old_module.weight.data
135
+ if need_bias:
136
+ new_module.bias.data = old_module.bias.data
137
+ else:
138
+ new_module = None
139
+
140
+ if compression_technique is not None:
141
+ for k, v in compression_technique.items():
142
+ if k == SPARSE_PRUNING:
143
+ if v[SPARSE_PRUNING_ENABLED]:
144
+ new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD])
145
+ elif k == ROW_PRUNING:
146
+ if v[ROW_PRUNING_ENABLED]:
147
+ new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD])
148
+ elif k == HEAD_PRUNING:
149
+ if v[HEAD_PRUNING_ENABLED]:
150
+ new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD],
151
+ v[HEAD_PRUNING_NUM_HEADS])
152
+ elif k == ACTIVATION_QUANTIZATION:
153
+ if v[ACTIVATION_QUANTIZATION_ENABLED]:
154
+ new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE],
155
+ v[ACTIVATION_QUANTIZE_RANGE])
156
+ elif k == WEIGHT_QUANTIZATION:
157
+ if v[WEIGHT_QUANTIZE_ENABLED]:
158
+ new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS],
159
+ v[WEIGHT_QUANTIZE_TARGET_BITS],
160
+ v[WEIGHT_QUANTIZATION_PERIOD],
161
+ v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
162
+ v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS])
163
+ elif k == CHANNEL_PRUNING:
164
+ if v[CHANNEL_PRUNING_ENABLED]:
165
+ new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD])
166
+ else:
167
+ raise NotImplementedError('Compression technique {} is not implemented'.format(k))
168
+
169
+ # Replace the old module with the new one
170
+ recursive_setattr(model, module_name, new_module)
171
+
172
+
173
+ def is_module_compressible(module, mpu=None):
174
+ ret = isinstance(module, torch.nn.Linear) or \
175
+ isinstance(module, torch.nn.Conv2d) or \
176
+ isinstance(module, torch.nn.Embedding) or \
177
+ isinstance(module, torch.nn.BatchNorm2d)
178
+
179
+ if mpu is not None:
180
+ ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear)
181
+
182
+ return ret
183
+
184
+
185
+ def compression_preparation(model, compression_technique_list, mpu):
186
+ """
187
+ Prepare the compression techniques of a model.
188
+ Args:
189
+ model (`torch.nn.Module`)
190
+ The model to prepare the compression techniques of.
191
+ compression_technique_list (`list`)
192
+ The list of compression techniques to prepare the model to.
193
+ list[]
194
+ """
195
+ # Here we first replace all module with our linear wrapper
196
+ for module_name, module in model.named_modules():
197
+ if is_module_compressible(module, mpu):
198
+ module_replacement(model, module_name, mpu=mpu)
199
+ for module_name_lists, _, compression_technique in compression_technique_list:
200
+ for mnl in module_name_lists:
201
+ for module_name in mnl:
202
+ module_replacement(model, module_name, compression_technique)
203
+
204
+ return model
205
+
206
+
207
+ def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False):
208
+ """
209
+ Fix the compression technique of a module.
210
+ Args:
211
+ model (`torch.nn.Module`)
212
+ The model to fix the compression technique of.
213
+ module_name (`str`)
214
+ The name of the module to fix the compression technique of.
215
+ compression_technique (`str`)
216
+ The compression technique to fix the module to.
217
+ """
218
+ # Here we can make things much simpler by just replacing the module
219
+ module = recursive_getattr(model, module_name)
220
+ for k, v in compression_technique.items():
221
+ if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]:
222
+ return module.fix_weight_quantization()
223
+ elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]:
224
+ return module.fix_sparse_pruning_helper()
225
+ elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None):
226
+ return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction)
227
+ elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None):
228
+ return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction)
229
+ elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None):
230
+ return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction)
231
+
232
+
233
+ def convert_conv1d_to_linear(model, convert_type):
234
+ '''
235
+ This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
236
+ '''
237
+ if hasattr(model, 'module'):
238
+ c_model = model.module
239
+ else:
240
+ c_model = model
241
+
242
+ for name, module in c_model.named_modules():
243
+ if isinstance(module, convert_type):
244
+ old_module = recursive_getattr(c_model, name)
245
+ new_module = torch.nn.Linear(old_module.weight.data.size(0),
246
+ old_module.weight.data.size(1),
247
+ bias=True if old_module.bias is not None else False)
248
+ new_module.weight.data = old_module.weight.data.t().contiguous()
249
+ if new_module.bias is not None:
250
+ new_module.bias.data = old_module.bias.data.view(-1)
251
+
252
+ recursive_setattr(c_model, name, new_module)
253
+
254
+ return model
255
+
256
+
257
+ def generate_pruners(config, model):
258
+ """Generate pruners.
259
+ Args:
260
+ config (`neural_compressor.WeightPruningConfig`)
261
+ The object to the class WeightPruningConfig.
262
+ model (`torch.nn.module`)
263
+ The torch module object to be pruned.
264
+ """
265
+ assert nc_pruner is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
266
+ from nc_pruner.utils import process_config, parse_to_prune
267
+ from nc_pruner.pruners import get_pruner
268
+ assert isinstance(model, torch.nn.Module)
269
+ pruners_info = process_config(config)
270
+ pruners = []
271
+ for info in pruners_info:
272
+ modules = parse_to_prune(info, model)
273
+ if modules == {}:
274
+ logger.warning("one pruner hooks no layers, please have a check")
275
+
276
+ pruners.append(get_pruner(info, modules))
277
+ info['modules'] = [key for key in modules.keys()]
278
+ info['len_of_modules'] = len(info['modules'])
279
+ logger.info(info)
280
+ return pruners
281
+
282
+
283
+ def register_on_step_begin(model):
284
+ """Mount on_step_begin to the model.
285
+ Args:
286
+ model (`torch.nn.module`)
287
+ The torch module object to be pruned.
288
+ """
289
+
290
+ def hook(module, input):
291
+ for pruner in module.pruners:
292
+ pruner.on_step_begin(0)
293
+
294
+ hook_handle = model.register_forward_pre_hook(hook)
295
+ return hook_handle
296
+
297
+
298
+ def rewrite_optimizer_step(opt: torch.optim.Optimizer):
299
+ """Mount on_before/after_optimizer_step to the optimizer.
300
+ Args:
301
+ model (`torch.opt.Optimizer`)
302
+ The torch optimizer object to be hooked.
303
+ """
304
+
305
+ def new_step(self, closure=None):
306
+ if hasattr(self, "pruners"):
307
+ for pruner in self.pruners:
308
+ pruner.on_before_optimizer_step()
309
+
310
+ if closure is not None:
311
+ res = self.orig_step(closure)
312
+ else:
313
+ res = self.orig_step()
314
+ if hasattr(self, "pruners"):
315
+ for pruner in self.pruners:
316
+ pruner.on_after_optimizer_step()
317
+ return res
318
+
319
+ opt.orig_step = opt.step
320
+ import types
321
+ opt.step = types.MethodType(new_step, opt)
322
+ return opt
venv/lib/python3.10/site-packages/deepspeed/compression/scheduler.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .compress import get_module_name
7
+ from .constants import *
8
+ from .helper import recursive_getattr
9
+ from deepspeed.utils import logger
10
+
11
+
12
+ class compression_scheduler():
13
+ '''
14
+ Used to schedule different compression methods
15
+ '''
16
+
17
+ def __init__(self, model, compression_config):
18
+ self.model = model
19
+ self.compression_config = compression_config
20
+ self.make_init()
21
+ self.training_steps = 0
22
+ self.weight_quantization_enabled = False
23
+
24
+ self.verbose = {
25
+ WEIGHT_QUANTIZATION: False,
26
+ ACTIVATION_QUANTIZATION: False,
27
+ SPARSE_PRUNING: False,
28
+ HEAD_PRUNING: False,
29
+ ROW_PRUNING: False,
30
+ CHANNEL_PRUNING: False
31
+ }
32
+
33
+ def make_init(self):
34
+ self.different_compression_methods = {}
35
+ for method, method_content in self.compression_config.items():
36
+ if LAYER_REDUCTION in method:
37
+ continue
38
+ self.different_compression_methods[method] = {
39
+ TECHNIQUE_ENABLED: False,
40
+ SHARED_PARAMETERS: None,
41
+ DIFFERENT_GROUPS: []
42
+ }
43
+ exist_module_name = set()
44
+ shared_parameters = method_content[SHARED_PARAMETERS]
45
+ self.different_compression_methods[method][TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED]
46
+ self.different_compression_methods[method][SHARED_PARAMETERS] = shared_parameters
47
+
48
+ for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
49
+ module_name_list = []
50
+ for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
51
+ module_name, exist_module_name = get_module_name(group_name,
52
+ self.model,
53
+ key_word,
54
+ exist_module_name,
55
+ verbose=False)
56
+ module_name_list.extend(module_name)
57
+ if module_name_list:
58
+ self.different_compression_methods[method][DIFFERENT_GROUPS].append(
59
+ [group_name, module_name_list,
60
+ method_parameters.copy().pop('params')])
61
+
62
+ def check_weight_quantization(self):
63
+ # check weight quantization
64
+ wq = self.different_compression_methods[WEIGHT_QUANTIZATION]
65
+ if not wq[TECHNIQUE_ENABLED]:
66
+ return
67
+ else:
68
+ shared_parameters = wq[SHARED_PARAMETERS]
69
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
70
+ for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]:
71
+ for module_name in module_name_list:
72
+ module = recursive_getattr(self.model, module_name)
73
+ module.weight_quantization_enabled = True
74
+
75
+ if not self.verbose[WEIGHT_QUANTIZATION]:
76
+ logger.info(f'Weight quantization is enabled at step {self.training_steps}')
77
+ self.weight_quantization_enabled = True
78
+ self.verbose[WEIGHT_QUANTIZATION] = True
79
+
80
+ def check_activation_quantization(self):
81
+ # check activation quantization
82
+ aq = self.different_compression_methods[ACTIVATION_QUANTIZATION]
83
+ if not aq[TECHNIQUE_ENABLED]:
84
+ return
85
+ else:
86
+ shared_parameters = aq[SHARED_PARAMETERS]
87
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
88
+ for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]:
89
+ for module_name in module_name_list:
90
+ module = recursive_getattr(self.model, module_name)
91
+ module.activation_quantization_enabled = True
92
+ if not self.verbose[ACTIVATION_QUANTIZATION]:
93
+ logger.info(f'Activation quantization is enabled at step {self.training_steps}')
94
+ self.verbose[ACTIVATION_QUANTIZATION] = True
95
+
96
+ def check_sparse_pruning(self):
97
+ # check sparse pruning
98
+ sp = self.different_compression_methods[SPARSE_PRUNING]
99
+ if not sp[TECHNIQUE_ENABLED]:
100
+ return
101
+ else:
102
+ shared_parameters = sp[SHARED_PARAMETERS]
103
+ if shared_parameters[TECHNIQUE_SCHEDULE_OFFSET] <= self.training_steps <= shared_parameters[
104
+ TECHNIQUE_SCHEDULE_OFFSET_END]:
105
+ for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]:
106
+ for module_name in module_name_list:
107
+ module = recursive_getattr(self.model, module_name)
108
+ module.sparse_pruning_enabled = True
109
+ if not self.verbose[SPARSE_PRUNING]:
110
+ logger.info(f'Sparse pruning is enabled at step {self.training_steps}')
111
+ self.verbose[SPARSE_PRUNING] = True
112
+
113
+ def check_head_pruning(self):
114
+ # check head pruning
115
+ hp = self.different_compression_methods[HEAD_PRUNING]
116
+ if not hp[TECHNIQUE_ENABLED]:
117
+ return
118
+ else:
119
+ shared_parameters = hp[SHARED_PARAMETERS]
120
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
121
+ for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]:
122
+ for module_name in module_name_list:
123
+ module = recursive_getattr(self.model, module_name)
124
+ module.head_pruning_enabled = True
125
+ if not self.verbose[HEAD_PRUNING]:
126
+ logger.info(f'Head pruning is enabled at step {self.training_steps}')
127
+ self.verbose[HEAD_PRUNING] = True
128
+
129
+ def check_row_pruning(self):
130
+ # check row pruning
131
+ rp = self.different_compression_methods[ROW_PRUNING]
132
+ if not rp[TECHNIQUE_ENABLED]:
133
+ return
134
+ else:
135
+ shared_parameters = rp[SHARED_PARAMETERS]
136
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
137
+ for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]:
138
+ for module_name in module_name_list:
139
+ module = recursive_getattr(self.model, module_name)
140
+ module.row_pruning_enabled = True
141
+ if not self.verbose[ROW_PRUNING]:
142
+ logger.info(f'Row pruning is enabled at step {self.training_steps}')
143
+ self.verbose[ROW_PRUNING] = True
144
+
145
+ def check_channel_pruning(self):
146
+ # check channel pruning
147
+ cp = self.different_compression_methods[CHANNEL_PRUNING]
148
+ if not cp[TECHNIQUE_ENABLED]:
149
+ return
150
+ else:
151
+ shared_parameters = cp[SHARED_PARAMETERS]
152
+ if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
153
+ for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]:
154
+ for module_name in module_name_list:
155
+ module = recursive_getattr(self.model, module_name)
156
+ module.channel_pruning_enabled = True
157
+ if not self.verbose[CHANNEL_PRUNING]:
158
+ logger.info(f'Channel pruning is enabled at step {self.training_steps}')
159
+ self.verbose[CHANNEL_PRUNING] = True
160
+
161
+ def check_all_modules(self):
162
+ # check all different compression methods we have
163
+ self.check_weight_quantization()
164
+ self.check_activation_quantization()
165
+ self.check_sparse_pruning()
166
+ self.check_head_pruning()
167
+ self.check_row_pruning()
168
+ self.check_channel_pruning()
169
+
170
+ def step(self, step_zero_check=False):
171
+ if not step_zero_check:
172
+ self.training_steps += 1
173
+ self.check_all_modules()
venv/lib/python3.10/site-packages/deepspeed/compression/utils.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from torch import autograd
8
+ import math
9
+
10
+
11
+ class TopKBinarizer(autograd.Function):
12
+ """
13
+ Top-k Binarizer.
14
+ Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
15
+ is among the k% highest values of S.
16
+ Implementation is inspired from:
17
+ https://github.com/yaozhewei/MLPruning
18
+ """
19
+
20
+ @staticmethod
21
+ def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
22
+ """
23
+ Args:
24
+ inputs (`torch.FloatTensor`)
25
+ The input matrix from which the binarizer computes the binary mask.
26
+ threshold (`float`)
27
+ The percentage of weights to keep (the rest is pruned).
28
+ `threshold` is a float between 0 and 1.
29
+ sigmoid (`bool`)
30
+ Whether to apply a sigmoid on the threshold
31
+ Returns:
32
+ mask (`torch.FloatTensor`)
33
+ Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
34
+ retained, 0 - the associated weight is pruned).
35
+ """
36
+ # Get the subnetwork by sorting the inputs and using the top threshold
37
+ if sigmoid:
38
+ threshold = torch.sigmoid(threshold).item()
39
+ ctx.sigmoid = sigmoid
40
+ mask = inputs.clone()
41
+
42
+ _, idx = inputs.flatten().sort(descending=True)
43
+ j = math.ceil(threshold * inputs.numel())
44
+
45
+ # flat_out and mask access the same memory.
46
+ flat_out = mask.flatten()
47
+ flat_out[idx[j:]] = 0.
48
+ flat_out[idx[:j]] = 1.
49
+ ctx.save_for_backward(mask)
50
+
51
+ return mask
52
+
53
+ @staticmethod
54
+ def backward(ctx, gradOutput):
55
+ mask, = ctx.saved_tensors
56
+ if ctx.sigmoid:
57
+ return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None
58
+ else:
59
+ return gradOutput.clone(), None, None
60
+
61
+
62
+ class SymQuantizer(torch.autograd.Function):
63
+ """
64
+ Symmetric quantization
65
+ """
66
+
67
+ @staticmethod
68
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
69
+ """
70
+ Args:
71
+ inputs (`torch.FloatTensor`)
72
+ The input which needs to be quantized
73
+ num_bits (int, >=4)
74
+ Number of bits to use for quantization
75
+ min_value/max_value (torch.FloatTensor)
76
+ Used for static activation quantization
77
+ num_groups (int)
78
+ How many groups to partition the quantization into
79
+ Returns:
80
+ quantized_input (`torch.FloatTensor`)
81
+ Quantized input
82
+ """
83
+ assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
84
+ and num_groups == 1)
85
+ q_range = 2**num_bits
86
+ input_shape = input.shape
87
+ if min_value is None:
88
+ input = input.reshape(num_groups, -1)
89
+ max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
90
+ else:
91
+ max_input = torch.max(min_value.abs(), max_value).view(-1)
92
+
93
+ scale = 2 * max_input / q_range
94
+ output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
95
+ output = output.reshape(input_shape).contiguous()
96
+ return output
97
+
98
+ @staticmethod
99
+ def backward(ctx, grad_output):
100
+ grad_input = grad_output.clone()
101
+ return grad_input, None, None, None, None
102
+
103
+
104
+ class AsymQuantizer(torch.autograd.Function):
105
+ """
106
+ Asymmetric quantization
107
+ """
108
+
109
+ @staticmethod
110
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
111
+ """
112
+ Args:
113
+ inputs (`torch.FloatTensor`)
114
+ The input which needs to be quantized
115
+ num_bits (int, >=4)
116
+ Number of bits to use for quantization
117
+ min_value/max_value (torch.FloatTensor)
118
+ Used for static activation quantization
119
+ num_groups (int)
120
+ How many groups to partition the quantization into
121
+ Returns:
122
+ quantized_input (`torch.FloatTensor`)
123
+ Quantized input
124
+ """
125
+
126
+ assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
127
+ and num_groups == 1)
128
+ q_range = 2**num_bits
129
+ input_shape = input.shape
130
+ if min_value is None:
131
+ input = input.reshape(num_groups, -1)
132
+ min_value = input.amin(dim=-1, keepdim=True)
133
+ max_value = input.amax(dim=-1, keepdim=True)
134
+
135
+ scale = (max_value - min_value) / q_range
136
+ zero_point = (min_value / scale).round() * scale
137
+
138
+ output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point
139
+ output = output.reshape(input_shape).contiguous()
140
+ return output
141
+
142
+ @staticmethod
143
+ def backward(ctx, grad_output):
144
+ grad_input = grad_output.clone()
145
+ return grad_input, None, None, None, None
146
+
147
+
148
+ class TernaryQuantizer(torch.autograd.Function):
149
+ """
150
+ Ternary quantization
151
+ """
152
+
153
+ @staticmethod
154
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
155
+ """
156
+ Args:
157
+ inputs (`torch.FloatTensor`)
158
+ The input which needs to be quantized
159
+ num_bits (int)
160
+ Dummy variable
161
+ min_value/max_value (torch.FloatTensor)
162
+ Used for static activation quantization; for now they are dummy variable
163
+ num_groups (int)
164
+ How many groups to partition the quantization into
165
+ Returns:
166
+ quantized_input (`torch.FloatTensor`)
167
+ Quantized input
168
+ """
169
+
170
+ assert (min_value is None and max_value is None)
171
+ input_flat = input.reshape(num_groups, -1)
172
+ n = input_flat.shape[1]
173
+ m = input_flat.norm(p=1, dim=1).div(n)
174
+ thres = (0.7 * m).view(-1, 1)
175
+ pos = (input_flat > thres).type(input.type())
176
+ neg = (input_flat < -thres).type(input.type())
177
+ mask = (input_flat.abs() > thres).type(input.type())
178
+ alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
179
+ output = alpha * pos - alpha * neg
180
+ output = output.reshape(input.shape).contiguous()
181
+ return output
182
+
183
+ @staticmethod
184
+ def backward(ctx, grad_output):
185
+ grad_input = grad_output.clone()
186
+ return grad_input, None, None, None, None
187
+
188
+
189
+ class BinaryQuantizer(torch.autograd.Function):
190
+ """
191
+ Binary quantization
192
+ """
193
+
194
+ @staticmethod
195
+ def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
196
+ """
197
+ Args:
198
+ inputs (`torch.FloatTensor`)
199
+ The input which needs to be quantized
200
+ num_bits (int)
201
+ Dummy variable
202
+ min_value/max_value (torch.FloatTensor)
203
+ Used for static activation quantization; for now they are dummy variable
204
+ num_groups (int)
205
+ How many groups to partition the quantization into
206
+ Returns:
207
+ quantized_input (`torch.FloatTensor`)
208
+ Quantized input
209
+ """
210
+
211
+ assert (min_value is None and max_value is None)
212
+ input_flat = input.reshape(num_groups, -1)
213
+ n = input_flat.shape[1]
214
+ m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
215
+ output = input_flat.sign().mul(m)
216
+ output = output.reshape(input.shape).contiguous()
217
+ return output
218
+
219
+ @staticmethod
220
+ def backward(ctx, grad_output):
221
+ grad_input = grad_output.clone()
222
+ return grad_input, None, None, None, None
venv/lib/python3.10/site-packages/deepspeed/launcher/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (236 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/constants.cpython-310.pyc ADDED
Binary file (496 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launch.cpython-310.pyc ADDED
Binary file (9.64 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/launcher_helper.cpython-310.pyc ADDED
Binary file (2.95 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/multinode_runner.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/__pycache__/runner.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/launcher/launch.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ DeepSpeed launcher, this is similar to torch's distributed.launch but supports
7
+ additional features such as arbitrary gpu exclusion.
8
+
9
+ deepspeed.launcher.launch is intended to be run on a single worker node and
10
+ will spawn several worker sub-processes depending on how many devices/ranks
11
+ are on the worker.
12
+ """
13
+
14
+ import sys
15
+ import subprocess
16
+ import os
17
+ import json
18
+ import base64
19
+ import time
20
+ import signal
21
+ import psutil
22
+ from collections import defaultdict
23
+ from typing import Dict
24
+ from argparse import ArgumentParser, REMAINDER
25
+ from deepspeed.accelerator import get_accelerator
26
+ from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
27
+ from ..nebula.constants import DLTS_POD_ENV_PATH
28
+ from ..utils import logger, get_numactl_cmd
29
+ from ..elasticity import is_torch_elastic_compatible
30
+ from .constants import ELASTIC_TRAINING_ID_DEFAULT
31
+
32
+ PID_FILE_BASEPATH = "/tmp"
33
+
34
+
35
+ def parse_args():
36
+ parser = ArgumentParser(description="DeepSpeed distributed training launch"
37
+ " utility that creates multiple distributed"
38
+ " processes on a single node")
39
+
40
+ # Optional arguments for the launch helper
41
+ parser.add_argument("--node_rank",
42
+ type=int,
43
+ default=0,
44
+ help="The rank of the node for multi-node distributed "
45
+ "training")
46
+ parser.add_argument("--master_addr",
47
+ default="127.0.0.1",
48
+ type=str,
49
+ help="Master node (rank 0)'s address, should be either"
50
+ " the IP address or the hostname of node 0, for"
51
+ " single node multi-proc training, the"
52
+ " --master_addr can simply be 127.0.0.1")
53
+ parser.add_argument("--master_port",
54
+ default=TORCH_DISTRIBUTED_DEFAULT_PORT,
55
+ type=int,
56
+ help="Master node (rank 0)'s free port that needs to "
57
+ "be used for communication during distributed "
58
+ "training")
59
+ parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary")
60
+
61
+ parser.add_argument("--module",
62
+ action="store_true",
63
+ help="Change each process to interpret the launch "
64
+ "script as a Python module, executing with the same "
65
+ "behavior as 'python -m'.")
66
+
67
+ parser.add_argument("--no_python",
68
+ action="store_true",
69
+ help="Skip prepending the training script with "
70
+ "'python' - just execute it directly.")
71
+
72
+ parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.")
73
+
74
+ parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.")
75
+
76
+ parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.")
77
+
78
+ parser.add_argument("--no_local_rank",
79
+ action="store_true",
80
+ help="Do not pass local_rank as an argument when calling "
81
+ "the user's training script.")
82
+
83
+ parser.add_argument("--save_pid",
84
+ type=int,
85
+ default=0,
86
+ help="main launching process pid, for internal pid tracking")
87
+
88
+ parser.add_argument("--enable_each_rank_log",
89
+ default="None",
90
+ type=str,
91
+ help="redirect the stdout and stderr from each rank into different log files")
92
+
93
+ parser.add_argument("--bind_cores_to_rank",
94
+ action="store_true",
95
+ help="Bind each rank to different cores of the host. "
96
+ "This improves host efficiency especially for CPU backend")
97
+
98
+ parser.add_argument("--bind_core_list",
99
+ type=str,
100
+ default=None,
101
+ help="List of cores to bind to with comma separated list of "
102
+ "numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
103
+ "specified, all cores on system would be used rank binding")
104
+
105
+ # positional
106
+ parser.add_argument("training_script",
107
+ type=str,
108
+ help="The full path to the single GPU training "
109
+ "program/script to be launched in parallel, "
110
+ "followed by all the arguments for the "
111
+ "training script")
112
+
113
+ # rest from the training program
114
+ parser.add_argument('training_script_args', nargs=REMAINDER)
115
+ return parser.parse_args()
116
+
117
+
118
+ # Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree
119
+ def terminate_process_tree(pid):
120
+ process = psutil.Process(pid)
121
+ children = process.children(recursive=True)
122
+ children.append(process)
123
+ for child in children:
124
+ try:
125
+ child.terminate()
126
+ except psutil.NoSuchProcess:
127
+ pass
128
+ gone, alive = psutil.wait_procs(children, timeout=30)
129
+ for p in alive:
130
+ p.kill()
131
+
132
+
133
+ def main():
134
+ args = parse_args()
135
+ current_env = os.environ.copy()
136
+
137
+ for k in current_env.keys():
138
+ if "NCCL" in k:
139
+ logger.info(f"{args.node_rank} {k}={current_env[k]}")
140
+
141
+ if args.world_info == "None":
142
+ raise ValueError("world_info can not be None")
143
+ world_info = base64.urlsafe_b64decode(args.world_info)
144
+ world_info = json.loads(world_info)
145
+
146
+ logger.info(f"WORLD INFO DICT: {world_info}")
147
+ node_list = list(world_info.keys())
148
+ args.nnodes = len(node_list)
149
+ local_node = node_list[args.node_rank]
150
+ local_accelerator_ids = world_info[local_node]
151
+ num_local_procs = len(local_accelerator_ids)
152
+ logger.info(f"nnodes={args.nnodes}, num_local_procs={num_local_procs}, node_rank={args.node_rank}")
153
+
154
+ global_rank_mapping = defaultdict(list)
155
+ curr_global_rank = 0
156
+ dist_world_size = 0
157
+ for node_id in node_list:
158
+ gids = world_info[node_id]
159
+ dist_world_size += len(gids)
160
+ for gid in gids:
161
+ global_rank_mapping[node_id].append(curr_global_rank)
162
+ curr_global_rank += 1
163
+ logger.info(f"global_rank_mapping={global_rank_mapping}")
164
+ logger.info(f"dist_world_size={dist_world_size}")
165
+
166
+ get_accelerator().set_visible_devices_envs(current_env, local_accelerator_ids)
167
+ for env in get_accelerator().visible_devices_envs():
168
+ logger.info(f"Setting {env}={current_env[env]}")
169
+
170
+ # set PyTorch distributed related environmental variables
171
+ current_env["MASTER_ADDR"] = args.master_addr
172
+ current_env["MASTER_PORT"] = str(args.master_port)
173
+ current_env["WORLD_SIZE"] = str(dist_world_size)
174
+ current_env["CROSS_RANK"] = str(args.node_rank)
175
+ current_env["CROSS_SIZE"] = str(args.nnodes)
176
+ current_env["LOCAL_SIZE"] = str(num_local_procs)
177
+
178
+ if args.save_pid:
179
+ print(f"launcher pid: {os.getpid()}")
180
+
181
+ pid_file = None
182
+ if args.save_pid:
183
+ launcher_pid = os.getpid()
184
+ pid_file = os.path.join(PID_FILE_BASEPATH, f"{args.save_pid}.deepspeed")
185
+ assert not os.path.isfile(pid_file), "pid file exists but shouldn't"
186
+ with open(pid_file, 'w') as fd:
187
+ fd.write(f"{launcher_pid}")
188
+
189
+ if not is_torch_elastic_compatible():
190
+ if args.enable_elastic_training:
191
+ logger.info(f"Disabling elastic training support as \
192
+ PyTorch version should be greater than 1.11.x")
193
+ args.enable_elastic_training = False
194
+
195
+ if os.path.exists(DLTS_POD_ENV_PATH):
196
+ with open(DLTS_POD_ENV_PATH) as file:
197
+ lines = file.readlines()
198
+ lines = [line.rstrip() for line in lines]
199
+ for line in lines:
200
+ if line.startswith('export FC_TASKROLE_NAME') or line.startswith('export FC_TASK_INDEX'):
201
+ key_val = line.split()[1]
202
+ key, val = key_val.split('=')
203
+ current_env[key] = val
204
+
205
+ processes = []
206
+ cmd = []
207
+
208
+ if not args.enable_elastic_training:
209
+ if args.enable_each_rank_log != "None":
210
+ # prepare the log path and the file name prefix
211
+ if os.path.isfile(args.enable_each_rank_log):
212
+ raise ValueError(f"{args.enable_each_rank_log} should not be a file, it should be a directory.")
213
+ if not os.path.exists(args.enable_each_rank_log):
214
+ try:
215
+ os.makedirs(args.enable_each_rank_log)
216
+ except Exception as e:
217
+ print(e)
218
+ raise ValueError(f"unable to create directory {args.enable_each_rank_log} for each rank log.")
219
+ log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime())
220
+
221
+ for local_proc in range(0, num_local_procs):
222
+ # each process's rank
223
+ dist_rank = global_rank_mapping[local_node][local_proc]
224
+ local_rank = dist_rank % num_local_procs
225
+ current_env["RANK"] = str(dist_rank)
226
+ current_env["LOCAL_RANK"] = str(local_rank)
227
+
228
+ # spawn the processes
229
+ cmd = []
230
+ if args.bind_cores_to_rank:
231
+ cores_per_rank, numactl_cmd = get_numactl_cmd(args.bind_core_list, num_local_procs, local_rank)
232
+ current_env["OMP_NUM_THREADS"] = f"{cores_per_rank}"
233
+ cmd = cmd + numactl_cmd
234
+ if not args.no_python:
235
+ cmd.append(sys.executable)
236
+ cmd.append("-u")
237
+ if args.module:
238
+ cmd.append("-m")
239
+ else:
240
+ if args.module:
241
+ raise ValueError("Don't use both the '--no_python' flag"
242
+ " and the '--module' flag at the same time.")
243
+ cmd.append(args.training_script)
244
+ # A user may not want to pass local_rank as a keyword arg so we make this optional.
245
+ if not args.no_local_rank:
246
+ cmd.append(f"--local_rank={local_rank}")
247
+ cmd += args.training_script_args
248
+
249
+ if args.enable_each_rank_log != "None":
250
+ log_file = os.path.join(args.enable_each_rank_log, f"{log_name_prefix}_rank{dist_rank}.log")
251
+ log_fd = open(log_file, 'w')
252
+ process = subprocess.Popen(cmd, env=current_env, stdout=log_fd, stderr=log_fd)
253
+ else:
254
+ process = subprocess.Popen(cmd, env=current_env)
255
+ # logs the command from processes
256
+ logger.info(f"process {process.pid} spawned with command: {cmd}")
257
+ processes.append(process)
258
+ else:
259
+ from ..elasticity import DSElasticAgent
260
+ from torch.distributed.elastic.rendezvous import RendezvousParameters
261
+ from torch.distributed.elastic.agent.server.api import WorkerSpec
262
+ import torch.distributed.elastic.rendezvous.registry as rdzv_registry
263
+ from torch.distributed.elastic.multiprocessing import Std
264
+
265
+ if args.min_elastic_nodes == -1:
266
+ args.min_elastic_nodes = 1
267
+ if args.max_elastic_nodes == -1:
268
+ args.max_elastic_nodes = args.nnodes
269
+ assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0, "Max and Min nodes should be positive"
270
+
271
+ current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
272
+
273
+ # Get config and arguments
274
+ cmd = []
275
+ if not args.no_python:
276
+ cmd = [sys.executable, "-u"]
277
+ if args.module:
278
+ cmd.append("-m")
279
+ else:
280
+ if args.module:
281
+ raise ValueError("Don't use both the '--no_python' flag"
282
+ " and the '--module' flag at the same time.")
283
+ cmd.append(args.training_script)
284
+ cmd += args.training_script_args
285
+ cmd_args = cmd[1:]
286
+
287
+ rdzv_configs: Dict[str, str] = {'timeout': 100}
288
+ run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT)
289
+
290
+ # Creating config for rendezvous class
291
+ rdzv_parameters = RendezvousParameters(backend='c10d',
292
+ endpoint=args.master_addr + ":" + str(args.master_port),
293
+ run_id=run_id,
294
+ min_nodes=args.min_elastic_nodes,
295
+ max_nodes=args.max_elastic_nodes,
296
+ **rdzv_configs)
297
+
298
+ spec = WorkerSpec(
299
+ role='trainer',
300
+ local_world_size=num_local_procs,
301
+ entrypoint=cmd[0],
302
+ args=cmd[1:],
303
+ rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
304
+ max_restarts=100,
305
+ monitor_interval=5,
306
+ redirects=Std.from_str("0"),
307
+ tee=Std.from_str("0"),
308
+ master_addr=None,
309
+ master_port=None,
310
+ )
311
+ agent = DSElasticAgent(spec, current_env)
312
+ agent.run()
313
+
314
+ sig_names = {2: "SIGINT", 15: "SIGTERM"}
315
+ last_return_code = None
316
+
317
+ def sigkill_handler(signum, frame):
318
+ for process in processes:
319
+ logger.info(f"Killing subprocess {process.pid}")
320
+ try:
321
+ terminate_process_tree(process.pid)
322
+ except Exception:
323
+ pass
324
+ if last_return_code is not None:
325
+ logger.error(f"{cmd} exits with return code = {last_return_code}")
326
+ sys.exit(last_return_code)
327
+ if signum in sig_names:
328
+ logger.info(f"Main process received {sig_names[signum]}, exiting")
329
+ if args.save_pid:
330
+ if os.path.isfile(pid_file):
331
+ os.remove(pid_file)
332
+ sys.exit(1)
333
+
334
+ # pass SIGINT/SIGTERM to children if the parent is being terminated
335
+ signal.signal(signal.SIGINT, sigkill_handler)
336
+ signal.signal(signal.SIGTERM, sigkill_handler)
337
+
338
+ alive_processes = set(processes)
339
+ while len(alive_processes):
340
+ finished_processes = []
341
+ for process in alive_processes:
342
+ if process.poll() is None:
343
+ # the process is still running
344
+ continue
345
+ else:
346
+ if process.returncode != 0:
347
+ last_return_code = process.returncode # for sigkill_handler
348
+ sigkill_handler(signal.SIGTERM, None) # not coming back
349
+ else:
350
+ # exited cleanly
351
+ logger.info(f"Process {process.pid} exits successfully.")
352
+ finished_processes.append(process)
353
+ alive_processes = set(alive_processes) - set(finished_processes)
354
+
355
+ time.sleep(1)
356
+
357
+
358
+ if __name__ == "__main__":
359
+ main()
venv/lib/python3.10/site-packages/deepspeed/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (532 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cpu_adagrad import DeepSpeedCPUAdagrad
venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (244 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/__pycache__/cpu_adagrad.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/adagrad/cpu_adagrad.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.ops.op_builder import CPUAdagradBuilder
8
+ from deepspeed.utils.logging import should_log_le
9
+
10
+
11
+ class DeepSpeedCPUAdagrad(torch.optim.Optimizer):
12
+ optimizer_id = 0
13
+
14
+ def __init__(self, model_params, lr=1e-2, eps=1e-10, weight_decay=0, amsgrad=False, fp32_optimizer_states=True):
15
+
16
+ default_args = dict(lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
17
+ super(DeepSpeedCPUAdagrad, self).__init__(model_params, default_args)
18
+
19
+ self.opt_id = DeepSpeedCPUAdagrad.optimizer_id
20
+ DeepSpeedCPUAdagrad.optimizer_id = DeepSpeedCPUAdagrad.optimizer_id + 1
21
+ self.fp32_optimizer_states = fp32_optimizer_states
22
+ self.ds_opt_adagrad = CPUAdagradBuilder().load()
23
+
24
+ self.ds_opt_adagrad.create_adagrad(self.opt_id, lr, eps, weight_decay, should_log_le("info"))
25
+
26
+ def __del__(self):
27
+ # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
28
+ # is used multiple times in the same process (notebook or pytest worker)
29
+ self.ds_opt_adagrad.destroy_adagrad(self.opt_id)
30
+
31
+ def __setstate__(self, state):
32
+ super(DeepSpeedCPUAdagrad, self).__setstate__(state)
33
+ for group in self.param_groups:
34
+ group.setdefault('amsgrad', False)
35
+
36
+ @torch.no_grad()
37
+ def step(self, closure=None, fp16_param_groups=None):
38
+ """Update the model parameters.
39
+
40
+ .. note::
41
+ This method will be called internally by ZeRO-Offload. DeepSpeed
42
+ users should still use ``engine.step()`` as shown in the
43
+ `Getting Started
44
+ <https://www.deepspeed.ai/getting-started/#training>`_ guide.
45
+
46
+ Args:
47
+ closure (callable, optional): closure to compute the loss.
48
+ Defaults to ``None``.
49
+ fp16_param_groups: FP16 GPU parameters to update. Performing the
50
+ copy here reduces communication time. Defaults to ``None``.
51
+
52
+ Returns:
53
+ loss: if ``closure`` is provided. Otherwise ``None``.
54
+ """
55
+
56
+ loss = None
57
+ if closure is not None:
58
+ with torch.enable_grad():
59
+ loss = closure()
60
+
61
+ # intended device for step
62
+ device = torch.device('cpu')
63
+
64
+ for group_id, group in enumerate(self.param_groups):
65
+ for param_id, p in enumerate(group['params']):
66
+
67
+ if p.grad is None:
68
+ continue
69
+
70
+ assert p.device == device, f"CPUAdagrad param is on {p.device} and must be 'cpu', make " \
71
+ "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
72
+
73
+ state = self.state[p]
74
+ # State initialization
75
+ if len(state) == 0:
76
+ #print(f'group {group_id} param {param_id} = {p.numel()}')
77
+ state['step'] = 0
78
+
79
+ #use full precision by default unless self.fp32_optimizer_states is off
80
+ state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
81
+
82
+ #memory_format=torch.preserve_format)
83
+ # gradient variances
84
+ state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device='cpu')
85
+ #memory_format=torch.preserve_format)
86
+
87
+ state['step'] += 1
88
+
89
+ if p.grad.is_sparse == True:
90
+ sparse_param = p.sparse_mask(p.grad)
91
+ sparse_exp_avg_sq = state['exp_avg_sq'].sparse_mask(p.grad)
92
+ self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
93
+ group['weight_decay'], sparse_param.values(), p.grad.values(),
94
+ sparse_exp_avg_sq.values())
95
+ p[sparse_param.indices()] = sparse_param.values()
96
+ state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values()
97
+ if fp16_param_groups is not None:
98
+ fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values()
99
+ else:
100
+ if fp16_param_groups is not None:
101
+ self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'],
102
+ group['weight_decay'], p.data, p.grad.data,
103
+ state['exp_avg_sq'],
104
+ fp16_param_groups[group_id][param_id].data)
105
+ else:
106
+ self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
107
+ group['weight_decay'], p.data, p.grad.data,
108
+ state['exp_avg_sq'])
109
+ return loss
venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .fused_lamb import FusedLamb
venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (230 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lamb/__pycache__/fused_lamb.cpython-310.pyc ADDED
Binary file (5.48 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lamb/fused_lamb.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer
8
+ """
9
+ import types
10
+ import torch
11
+ from deepspeed.ops.op_builder import FusedLambBuilder
12
+
13
+
14
+ class FusedLamb(torch.optim.Optimizer):
15
+ """Implements the LAMB algorithm. Currently GPU-only.
16
+
17
+ LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes.
18
+ https://arxiv.org/abs/1904.00962
19
+
20
+ Arguments:
21
+ params (iterable): iterable of parameters to optimize or dicts defining
22
+ parameter groups.
23
+ lr (float, optional): learning rate. (default: 1e-3)
24
+ bias_correction (bool, optional): bias correction (default: True)
25
+ betas (Tuple[float, float], optional): coefficients used for computing
26
+ running averages of gradient and its square. (default: (0.9, 0.999))
27
+ eps (float, optional): term added to the denominator to improve
28
+ numerical stability. (default: 1e-8)
29
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
30
+ adds eps to the bias-corrected second moment estimate before
31
+ evaluating square root instead of adding it to the square root of
32
+ second moment estimate as in the original paper. (default: False)
33
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
34
+ max_grad_norm (float, optional): value used to clip global grad norm
35
+ (default: 0.0)
36
+ max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
37
+ min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
38
+ amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb!
39
+ """
40
+
41
+ def __init__(self,
42
+ params,
43
+ lr=1e-3,
44
+ bias_correction=True,
45
+ betas=(0.9, 0.999),
46
+ eps=1e-8,
47
+ eps_inside_sqrt=False,
48
+ weight_decay=0.,
49
+ max_grad_norm=0.,
50
+ max_coeff=10.0,
51
+ min_coeff=0.01,
52
+ amsgrad=False):
53
+ self.fused_lamb_cuda = FusedLambBuilder().load()
54
+
55
+ if amsgrad:
56
+ raise RuntimeError('FusedLamb does not support the AMSGrad variant.')
57
+ defaults = dict(lr=lr,
58
+ bias_correction=bias_correction,
59
+ betas=betas,
60
+ eps=eps,
61
+ weight_decay=weight_decay,
62
+ max_grad_norm=max_grad_norm,
63
+ max_coeff=max_coeff,
64
+ min_coeff=min_coeff)
65
+ super(FusedLamb, self).__init__(params, defaults)
66
+ self.eps_mode = 0 if eps_inside_sqrt else 1
67
+ self.lamb_coeffs = []
68
+
69
+ def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
70
+ """Performs a single optimization step.
71
+
72
+ Arguments:
73
+ closure (callable, optional): A closure that reevaluates the model
74
+ and returns the loss.
75
+ grads (list of tensors, optional): weight gradient to use for the
76
+ optimizer update. If gradients have type torch.half, parameters
77
+ are expected to be in type torch.float. (default: None)
78
+ output params (list of tensors, optional): A reduced precision copy
79
+ of the updated weights written out in addition to the regular
80
+ updated weights. Have to be of same type as gradients. (default: None)
81
+ scale (float, optional): factor to divide gradient tensor values
82
+ by before applying to weights. (default: 1)
83
+ """
84
+ loss = None
85
+ if closure is not None:
86
+ loss = closure()
87
+
88
+ if grads is None:
89
+ grads_group = [None] * len(self.param_groups)
90
+ # backward compatibility
91
+ # assuming a list/generator of parameter means single group
92
+ elif isinstance(grads, types.GeneratorType):
93
+ grads_group = [grads]
94
+ elif type(grads[0]) != list:
95
+ grads_group = [grads]
96
+ else:
97
+ grads_group = grads
98
+
99
+ if output_params is None:
100
+ output_params_group = [None] * len(self.param_groups)
101
+ elif isinstance(output_params, types.GeneratorType):
102
+ output_params_group = [output_params]
103
+ elif type(output_params[0]) != list:
104
+ output_params_group = [output_params]
105
+ else:
106
+ output_params_group = output_params
107
+
108
+ if grad_norms is None:
109
+ grad_norms = [None] * len(self.param_groups)
110
+
111
+ #remove the previous coeffs
112
+ del self.lamb_coeffs[:]
113
+
114
+ for group, grads_this_group, output_params_this_group, grad_norm_group in zip(
115
+ self.param_groups, grads_group, output_params_group, grad_norms):
116
+ if grads_this_group is None:
117
+ grads_this_group = [None] * len(group['params'])
118
+ if output_params_this_group is None:
119
+ output_params_this_group = [None] * len(group['params'])
120
+
121
+ if grad_norm_group is None:
122
+ grad_norm_group = [None] * len(group['params'])
123
+ elif not isinstance(grad_norm_group, list):
124
+ grad_norm_group = [grad_norm_group]
125
+
126
+ bias_correction = 1 if group['bias_correction'] else 0
127
+
128
+ for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group,
129
+ grad_norm_group):
130
+
131
+ # compute combined scale factor for this group
132
+ combined_scale = scale
133
+ if group['max_grad_norm'] > 0:
134
+ # norm is in fact norm*scale
135
+ clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
136
+ if clip > 1:
137
+ combined_scale = clip * scale
138
+
139
+ #note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
140
+ if p.grad is None and grad is None:
141
+ continue
142
+ if grad is None:
143
+ grad = p.grad.data
144
+ if grad.is_sparse:
145
+ raise RuntimeError('FusedLamb does not support sparse gradients')
146
+
147
+ state = self.state[p]
148
+
149
+ # State initialization
150
+ if len(state) == 0:
151
+ state['step'] = 0
152
+ # Exponential moving average of gradient values
153
+ state['exp_avg'] = torch.zeros_like(p.data)
154
+ # Exponential moving average of squared gradient values
155
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
156
+
157
+ exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
158
+ beta1, beta2 = group['betas']
159
+ max_coeff = group['max_coeff']
160
+ min_coeff = group['min_coeff']
161
+
162
+ state['step'] += 1
163
+
164
+ out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param
165
+ lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1,
166
+ beta2, max_coeff, min_coeff, group['eps'], combined_scale,
167
+ state['step'], self.eps_mode, bias_correction,
168
+ group['weight_decay'])
169
+ self.lamb_coeffs.append(lamb_coeff)
170
+ return loss
171
+
172
+ def get_lamb_coeffs(self):
173
+ lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs]
174
+ return lamb_coeffs
venv/lib/python3.10/site-packages/deepspeed/ops/lion/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cpu_lion import DeepSpeedCPULion
7
+ from .fused_lion import FusedLion
venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (279 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/cpu_lion.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/fused_lion.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lion/__pycache__/multi_tensor_apply.cpython-310.pyc ADDED
Binary file (795 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/lion/cpu_lion.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from cpuinfo import get_cpu_info
8
+ from deepspeed.utils import logger
9
+ from deepspeed.utils.logging import should_log_le
10
+ from deepspeed.ops.op_builder import CPULionBuilder
11
+
12
+
13
+ class DeepSpeedCPULion(torch.optim.Optimizer):
14
+ optimizer_id = 0
15
+
16
+ def __init__(self, model_params, lr=1e-3, betas=(0.9, 0.999), weight_decay=0, fp32_optimizer_states=True):
17
+ """Fast vectorized implementation of Lion optimizer on CPU:
18
+
19
+ See Symbolic Discovery of Optimization Algorithms (https://doi.org/10.48550/arXiv.2302.06675).
20
+
21
+ .. note::
22
+ We recommend using our `config
23
+ <https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
24
+ to allow :meth:`deepspeed.initialize` to build this optimizer
25
+ for you.
26
+
27
+
28
+ Arguments:
29
+ model_params (iterable): iterable of parameters to optimize or dicts defining
30
+ parameter groups.
31
+ lr (float, optional): learning rate. (default: 1e-3)
32
+ betas (Tuple[float, float], optional): coefficients used for computing
33
+ running averages of gradient and its square. (default: (0.9, 0.999))
34
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
35
+ full_precision_optimizer_states: creates momentum and variance in full precision regardless of
36
+ the precision of the parameters (default: True)
37
+ """
38
+
39
+ default_args = dict(lr=lr, betas=betas, weight_decay=weight_decay)
40
+ super(DeepSpeedCPULion, self).__init__(model_params, default_args)
41
+
42
+ cpu_info = get_cpu_info()
43
+ self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
44
+ if "amd" in self.cpu_vendor:
45
+ for group_id, group in enumerate(self.param_groups):
46
+ for param_id, p in enumerate(group['params']):
47
+ if p.dtype == torch.half:
48
+ logger.warning("FP16 params for CPULion may not work on AMD CPUs")
49
+ break
50
+ else:
51
+ continue
52
+ break
53
+
54
+ self.opt_id = DeepSpeedCPULion.optimizer_id
55
+ DeepSpeedCPULion.optimizer_id = DeepSpeedCPULion.optimizer_id + 1
56
+ self.fp32_optimizer_states = fp32_optimizer_states
57
+ self.ds_opt_lion = CPULionBuilder().load()
58
+
59
+ self.ds_opt_lion.create_lion(self.opt_id, lr, betas[0], betas[1], weight_decay, should_log_le("info"))
60
+
61
+ def __del__(self):
62
+ # need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
63
+ # is used multiple times in the same process (notebook or pytest worker)
64
+ self.ds_opt_lion.destroy_lion(self.opt_id)
65
+
66
+ def __setstate__(self, state):
67
+ super(DeepSpeedCPULion, self).__setstate__(state)
68
+ for group in self.param_groups:
69
+ group.setdefault('amsgrad', False)
70
+
71
+ @torch.no_grad()
72
+ def step(self, closure=None, fp16_param_groups=None):
73
+ """Update the model parameters.
74
+
75
+ .. note::
76
+ This method will be called internally by ZeRO-Offload. DeepSpeed
77
+ users should still use ``engine.step()`` as shown in the
78
+ `Getting Started
79
+ <https://www.deepspeed.ai/getting-started/#training>`_ guide.
80
+
81
+ Args:
82
+ closure (callable, optional): closure to compute the loss.
83
+ Defaults to ``None``.
84
+ fp16_param_groups: FP16 GPU parameters to update. Performing the
85
+ copy here reduces communication time. Defaults to ``None``.
86
+
87
+ Returns:
88
+ loss: if ``closure`` is provided. Otherwise ``None``.
89
+ """
90
+
91
+ loss = None
92
+ if closure is not None:
93
+ with torch.enable_grad():
94
+ loss = closure()
95
+
96
+ # intended device for step
97
+ device = torch.device('cpu')
98
+
99
+ # converting the fp16 params to a group of parameter
100
+ if type(fp16_param_groups) is list:
101
+ if type(fp16_param_groups[0]) is not list:
102
+ fp16_param_groups = [fp16_param_groups]
103
+ elif fp16_param_groups is not None:
104
+ fp16_param_groups = [[fp16_param_groups]]
105
+
106
+ for group_id, group in enumerate(self.param_groups):
107
+ for param_id, p in enumerate(group['params']):
108
+
109
+ if p.grad is None:
110
+ continue
111
+
112
+ assert p.device == device, f"CPULion param is on {p.device} and must be 'cpu', make " \
113
+ "sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
114
+
115
+ state = self.state[p]
116
+ # State initialization
117
+ if len(state) == 0:
118
+ #print(f'group {group_id} param {param_id} = {p.numel()}')
119
+ state['step'] = 0
120
+
121
+ #use full precision by default unless self.fp32_optimizer_states is off
122
+ state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
123
+
124
+ # gradient momentums
125
+ state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
126
+ #memory_format=torch.preserve_format)
127
+ # gradient variances
128
+ state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
129
+ #memory_format=torch.preserve_format)
130
+
131
+ state['step'] += 1
132
+ beta1, beta2 = group['betas']
133
+
134
+ if fp16_param_groups is not None:
135
+ self.ds_opt_lion.lion_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
136
+ group['weight_decay'], p.data, p.grad.data, state['exp_avg'],
137
+ fp16_param_groups[group_id][param_id].data)
138
+ else:
139
+ self.ds_opt_lion.lion_update(self.opt_id, state['step'], group['lr'], beta1, beta2,
140
+ group['weight_decay'], p.data, p.grad.data, state['exp_avg'])
141
+ return loss
venv/lib/python3.10/site-packages/deepspeed/ops/lion/fused_lion.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ This file is modified from fused_adam.py
7
+ """
8
+
9
+ import torch
10
+ from .multi_tensor_apply import MultiTensorApply
11
+
12
+ multi_tensor_applier = MultiTensorApply(2048 * 32)
13
+ from deepspeed.accelerator import get_accelerator
14
+ from deepspeed.ops.op_builder import FusedLionBuilder
15
+
16
+
17
+ class FusedLion(torch.optim.Optimizer):
18
+ """Implements Lion algorithm.
19
+
20
+ Currently GPU-only.
21
+
22
+ Arguments:
23
+ params (iterable): iterable of parameters to optimize or dicts defining
24
+ parameter groups.
25
+ lr (float, optional): learning rate. (default: 1e-3)
26
+ betas (Tuple[float, float], optional): coefficients used for computing
27
+ running averages of gradient and its square. (default: (0.9, 0.999))
28
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
29
+ set_grad_none (bool, optional): whether set grad to None when zero_grad()
30
+ method is called. (default: True)
31
+
32
+ .. _Symbolic Discovery of Optimization Algorithms:
33
+ https://doi.org/10.48550/arXiv.2302.06675
34
+ """
35
+
36
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), weight_decay=0., set_grad_none=True):
37
+
38
+ defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)
39
+ super(FusedLion, self).__init__(params, defaults)
40
+ self.set_grad_none = set_grad_none
41
+
42
+ fused_lion_cuda = FusedLionBuilder().load()
43
+ # Skip buffer
44
+ self._dummy_overflow_buf = get_accelerator().IntTensor([0])
45
+ self.multi_tensor_lion = fused_lion_cuda.multi_tensor_lion
46
+
47
+ def zero_grad(self):
48
+ if self.set_grad_none:
49
+ for group in self.param_groups:
50
+ for p in group['params']:
51
+ p.grad = None
52
+ else:
53
+ super(FusedLion, self).zero_grad()
54
+
55
+ def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
56
+ """Performs a single optimization step.
57
+
58
+ Arguments:
59
+ closure (callable, optional): A closure that reevaluates the model
60
+ and returns the loss.
61
+
62
+ The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
63
+ """
64
+ if any(p is not None for p in [grads, output_params, scale, grad_norms]):
65
+ raise RuntimeError('FusedLion has been updated.')
66
+ loss = None
67
+ if closure is not None:
68
+ loss = closure()
69
+
70
+ for group in self.param_groups:
71
+ if len(group['params']) == 0:
72
+ continue
73
+ beta1, beta2 = group['betas']
74
+
75
+ # assume same step across group now to simplify things
76
+ # per parameter step can be easily support by making it tensor, or pass list into kernel
77
+ if 'step' not in group:
78
+ group['step'] = 0
79
+
80
+ # create lists for multi-tensor apply
81
+ g_16, p_16, m_16 = [], [], []
82
+ g_bf, p_bf, m_bf = [], [], []
83
+ g_32, p_32, m_32 = [], [], []
84
+
85
+ for p in group['params']:
86
+ if p.grad is None:
87
+ continue
88
+ if p.grad.data.is_sparse:
89
+ raise NotImplementedError('FusedLion does not support sparse gradients')
90
+
91
+ state = self.state[p]
92
+ # State initialization
93
+ if len(state) == 0:
94
+ # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
95
+ # While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
96
+ # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
97
+ state['step'] = group.get('step', 0)
98
+ # Exponential moving average of gradient values
99
+ state['exp_avg'] = torch.zeros_like(p.data)
100
+
101
+ if p.dtype == torch.float16:
102
+ g_16.append(p.grad.data)
103
+ p_16.append(p.data)
104
+ m_16.append(state['exp_avg'])
105
+ elif p.dtype == torch.bfloat16:
106
+ g_bf.append(p.grad)
107
+ p_bf.append(p)
108
+ m_bf.append(state['exp_avg'])
109
+ elif p.dtype == torch.float32:
110
+ g_32.append(p.grad.data)
111
+ p_32.append(p.data)
112
+ m_32.append(state['exp_avg'])
113
+ else:
114
+ raise RuntimeError('FusedLion only support fp16, bf16 and fp32.')
115
+
116
+ if len(g_16) > 0:
117
+ state['step'] += 1
118
+ multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_16, p_16, m_16], group['lr'],
119
+ beta1, beta2, state['step'], group['weight_decay'])
120
+
121
+ if len(g_bf) > 0:
122
+ state['step'] += 1
123
+ multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_bf, p_bf, m_bf], group['lr'],
124
+ beta1, beta2, state['step'], group['weight_decay'])
125
+
126
+ if len(g_32) > 0:
127
+ state['step'] += 1
128
+ multi_tensor_applier(self.multi_tensor_lion, self._dummy_overflow_buf, [g_32, p_32, m_32], group['lr'],
129
+ beta1, beta2, state['step'], group['weight_decay'])
130
+
131
+ return loss