File size: 25,067 Bytes
179036e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team

from .constants import *
import copy
from ..runtime.config_utils import get_scalar_param, get_list_param


def get_compression_config(param_dict):
    #
    output = {}

    if COMPRESSION_TRAINING not in param_dict.keys():
        param_dict[COMPRESSION_TRAINING] = {}
    sub_param_dict = param_dict[COMPRESSION_TRAINING]
    output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict)
    output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict)
    output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict)
    output[ROW_PRUNING] = get_row_pruning(sub_param_dict)
    output[HEAD_PRUNING] = get_head_pruning(sub_param_dict)
    output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict)

    output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict)

    return output


def get_layer_reduction(param_dict):
    output = {}
    output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT
    if get_layer_reduction_enabled(param_dict):
        output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict)
        for key, val in get_layer_reduction_params(param_dict).items():
            output[key] = val
    return output


def get_layer_reduction_enabled(param_dict):
    if LAYER_REDUCTION in param_dict.keys():
        return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT)
    else:
        return False


def get_layer_reduction_params(param_dict):
    if LAYER_REDUCTION in param_dict.keys():
        layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION])
        layer_reduction_params.pop(LAYER_REDUCTION_ENABLED)
        return layer_reduction_params
    else:
        return False


def get_quantize_enabled(param_dict):
    if COMPRESSION_TRAINING not in param_dict.keys():
        return False

    sub_param_dict = param_dict[COMPRESSION_TRAINING]
    output = get_weight_quantization_shared_parameters(sub_param_dict)
    return output[WEIGHT_QUANTIZE_ENABLED]


def get_weight_quantization(param_dict):
    output = {}
    if WEIGHT_QUANTIZATION not in param_dict.keys():
        param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[WEIGHT_QUANTIZATION]
    # shared parameters
    output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict)
    return output


def get_weight_quantization_shared_parameters(param_dict):
    output = {}
    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED,
                                                           WEIGHT_QUANTIZE_ENABLED_DEFAULT)
        output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL,
                                                          WEIGHT_QUANTIZE_KERNEL_DEFAULT)
        output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET,
                                                                   WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
        output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS,
                                                          WEIGHT_QUANTIZE_GROUPS_DEFAULT)
        output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE,
                                                           WEIGHT_QUANTIZE_VERBOSE_DEFAULT)
        output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE,
                                                        WEIGHT_QUANTIZE_TYPE_DEFAULT)
        output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict,
                                                                      WEIGHT_QUANTIZE_IN_FORWARD_ENABLED,
                                                                      WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT)
        assert output[WEIGHT_QUANTIZE_TYPE] in [
            WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC
        ], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]"
        output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING,
                                                            WEIGHT_QUANTIZE_ROUNDING_DEFAULT)
        assert output[WEIGHT_QUANTIZE_ROUNDING] in [
            WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING
        ], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]"
        if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys():
            output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param(
                sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED,
                WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT)
            output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param(
                sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO,
                WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT)
        else:
            output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
            output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
    else:
        output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT
        output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT
        output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
        output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT
        output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT
        output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT
        output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT
        output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
        output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
    return output


def get_weight_quantization_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(
        ), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}"
        assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(
        ), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}"
        group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD,
                                                                  WEIGHT_QUANTIZATION_PERIOD_DEFAULT)
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)

    return output


def get_activation_quantization(param_dict):
    output = {}
    if ACTIVATION_QUANTIZATION not in param_dict.keys():
        param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[ACTIVATION_QUANTIZATION]
    # shared parameters
    output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict)
    return output


def get_activation_quantization_shared_parameters(param_dict):
    output = {}
    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED,
                                                                   ACTIVATION_QUANTIZATION_ENABLED_DEFAULT)
        output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE,
                                                            ACTIVATION_QUANTIZE_TYPE_DEFAULT)
        assert output[ACTIVATION_QUANTIZE_TYPE] in [
            ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC
        ], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]"
        output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE,
                                                             ACTIVATION_QUANTIZE_RANGE_DEFAULT)
        assert output[ACTIVATION_QUANTIZE_RANGE] in [
            ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC
        ], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]"
        output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict,
                                                                       ACTIVATION_QUANTIZE_SCHEDULE_OFFSET,
                                                                       ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
    else:
        output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT
        output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT
        output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT
        output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
    return output


def get_activation_quantization_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(
        ), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}"
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)

    return output


def get_sparse_pruning(param_dict):
    output = {}
    if SPARSE_PRUNING not in param_dict.keys():
        param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[SPARSE_PRUNING]
    # shared parameters
    output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED] and output[SHARED_PARAMETERS][
            SPARSE_PRUNING_METHOD] != SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Sparse Pruning is enabled and not snip_momentum method, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict)
    return output


def get_sparse_pruning_shared_parameters(param_dict):
    output = {}

    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED,
                                                          SPARSE_PRUNING_ENABLED_DEFAULT)
        output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD,
                                                         SPARSE_PRUNING_METHOD_DEFAULT)
        assert output[SPARSE_PRUNING_METHOD] in [
            SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK, SPARSE_PRUNING_METHOD_SNIP_MOMENTUM
        ], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}, {SPARSE_PRUNING_METHOD_SNIP_MOMENTUM}]"
        output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET,
                                                                  SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT)
        if output[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
            output[SPARSE_PRUNING_BLOCK_PATTERN] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_BLOCK_PATTERN,
                                                                    SPARSE_PRUNING_BLOCK_PATTERN_DEFAULT)
            output[SPARSE_PRUNING_DENSE_RATIO] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_DENSE_RATIO,
                                                                  SPARSE_PRUNING_DENSE_RATIO_DEFAULT)
            assert output[SPARSE_PRUNING_DENSE_RATIO] > 0 and output[
                SPARSE_PRUNING_DENSE_RATIO] < 1, f"Invalid dense_ratio value. Must be less than 1"
            output[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE] = get_scalar_param(
                sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE, SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE_DEFAULT)
            output[SPARSE_PRUNING_EXCLUDED_MODULES] = get_list_param(sub_param_dict, SPARSE_PRUNING_EXCLUDED_MODULES,
                                                                     SPARSE_PRUNING_EXCLUDED_MODULES_DEFAULT)
            output[SPARSE_PRUNING_SCHEDULE_OFFSET_END] = get_scalar_param(sub_param_dict,
                                                                          SPARSE_PRUNING_SCHEDULE_OFFSET_END,
                                                                          output[SPARSE_PRUNING_SCHEDULE_OFFSET])
            assert output[SPARSE_PRUNING_SCHEDULE_OFFSET] <= output[
                SPARSE_PRUNING_SCHEDULE_OFFSET_END], f"Invalid schedule_offset and schedule_offset_end values"
    else:
        output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT
        output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT
        output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
    return output


def get_sparse_pruning_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(
        ), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}"
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)

    return output


def get_row_pruning(param_dict):
    output = {}
    if ROW_PRUNING not in param_dict.keys():
        param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[ROW_PRUNING]
    # shared parameters
    output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict)
    return output


def get_row_pruning_shared_parameters(param_dict):
    output = {}
    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED,
                                                       ROW_PRUNING_ENABLED_DEFAULT)
        output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT)
        assert output[ROW_PRUNING_METHOD] in [
            ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK
        ], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]"
        output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET,
                                                               ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT)
    else:
        output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT
        output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT
        output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT
    return output


def get_row_pruning_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(
        ), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}"
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
    return output


def get_head_pruning(param_dict):
    output = {}
    if HEAD_PRUNING not in param_dict.keys():
        param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[HEAD_PRUNING]
    # shared parameters
    output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict)
    return output


def get_head_pruning_shared_parameters(param_dict):
    output = {}
    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED,
                                                        HEAD_PRUNING_ENABLED_DEFAULT)
        output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD,
                                                       HEAD_PRUNING_METHOD_DEFAULT)
        assert output[HEAD_PRUNING_METHOD] in [
            HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK
        ], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]"
        output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET,
                                                                HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT)
        if output[HEAD_PRUNING_ENABLED]:
            assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(
            ), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning"
            output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS]
    else:
        output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT
        output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT
        output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT
    return output


def get_head_pruning_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(
        ), f"dense_ratio must be specified for head pruning group {name}"
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
    return output


def get_channel_pruning(param_dict):
    output = {}
    if CHANNEL_PRUNING not in param_dict.keys():
        param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
    sub_param_dict = param_dict[CHANNEL_PRUNING]
    # shared parameters
    output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict)
    # each sub-groups
    if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]:
        assert DIFFERENT_GROUPS in sub_param_dict.keys(
        ), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
    output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict)
    return output


def get_channel_pruning_shared_parameters(param_dict):
    output = {}
    if SHARED_PARAMETERS in param_dict.keys():
        sub_param_dict = param_dict[SHARED_PARAMETERS]
        output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED,
                                                           CHANNEL_PRUNING_ENABLED_DEFAULT)
        output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD,
                                                          CHANNEL_PRUNING_METHOD_DEFAULT)
        assert output[CHANNEL_PRUNING_METHOD] in [
            CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK
        ], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]"
        output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET,
                                                                   CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT)
    else:
        output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT
        output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT
        output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT
    return output


def get_channel_pruning_different_groups(param_dict):
    output = {}
    sub_param_dict = param_dict[DIFFERENT_GROUPS]

    def get_params(name, group_dict):
        assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(
        ), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}"
        return group_dict

    for k, v in sub_param_dict.items():
        output[k] = {}
        output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
        output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
                                                                    DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
        output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
            sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)

    return output