applied-ai-018 commited on
Commit
734b6a1
·
verified ·
1 Parent(s): 0cee4ac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/bf16_optimizer.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/runtime/base_optimizer.py +63 -0
  5. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py +5 -0
  6. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/coalesced_collectives.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py +141 -0
  12. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py +124 -0
  13. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py +215 -0
  14. venv/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py +166 -0
  15. venv/lib/python3.10/site-packages/deepspeed/runtime/config_utils.py +205 -0
  16. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__init__.py +5 -0
  21. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/__init__.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/basic_layer.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/helper.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/scheduler.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/utils.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py +113 -0
  27. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/helper.py +46 -0
  28. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/scheduler.py +107 -0
  29. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/utils.py +27 -0
  30. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/runtime/dataloader.py +162 -0
  36. venv/lib/python3.10/site-packages/deepspeed/runtime/eigenvalue.py +149 -0
  37. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py +5 -0
  38. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py +514 -0
  43. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py +270 -0
  44. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py +8 -0
  45. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py +306 -0
  50. venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py +443 -0
.gitattributes CHANGED
@@ -136,3 +136,5 @@ venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-l
136
  venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
137
  venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
138
  venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
 
 
 
136
  venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
137
  venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
138
  venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
139
+ venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
140
+ venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/bf16_optimizer.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/base_optimizer.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import torch
8
+
9
+ from deepspeed.utils import logger
10
+ from deepspeed.utils.tensor_fragment import map_to_flat_opt_states
11
+ from deepspeed.runtime.utils import bwc_tensor_model_parallel_rank
12
+
13
+
14
+ class DeepSpeedOptimizer(object):
15
+ pass
16
+
17
+
18
+ class ZeROOptimizer(DeepSpeedOptimizer):
19
+
20
+ def load_hp_checkpoint_state_from_checkpoint_dir(self, lp_groups_name: str, checkpoint_dir: str) -> None:
21
+ checkpoint_dir = os.path.join(checkpoint_dir, "zero")
22
+ optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt")
23
+ assert os.path.isfile(
24
+ optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.'
25
+ optim_sd = torch.load(optim_state_path)
26
+
27
+ self._load_global_state(optim_sd)
28
+
29
+ tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
30
+ if self.mpu is None:
31
+ logger.warn("MPU is not provided, setting tp size to 1 in checkpoint loading.")
32
+ tp_world_size = 1
33
+ else:
34
+ tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \
35
+ else self.mpu.get_tensor_model_parallel_world_size()
36
+
37
+ for i, (param_group,
38
+ loaded_param_group) in enumerate(zip(self.optimizer.param_groups, optim_sd['param_groups'])):
39
+ # We have an assumption that all params in the same param_group have the same keys
40
+ opt_keys = set()
41
+ steps = []
42
+
43
+ lp_groups = getattr(self, lp_groups_name)
44
+ for lp in lp_groups[i]:
45
+ if lp._hp_mapping is not None:
46
+ #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}")
47
+ step = lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank,
48
+ tp_world_size)
49
+ for key in lp._hp_mapping.get_optim_state_keys():
50
+ opt_keys.add(key)
51
+ steps.append(step)
52
+
53
+ hp_param = param_group['params'][0]
54
+ assert all(step == steps[0] for step in steps), f"Steps {steps} are not equal"
55
+ if steps[0] is not None:
56
+ self.optimizer.state[hp_param]['step'] = steps[0]
57
+
58
+ map_to_flat_opt_states(hp_param, lp_groups[i], self.optimizer.state, opt_keys)
59
+
60
+ for key, value in loaded_param_group.items():
61
+ if key == 'params':
62
+ continue
63
+ param_group[key] = value
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (240 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/coalesced_collectives.cpython-310.pyc ADDED
Binary file (4.33 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc ADDED
Binary file (3.65 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc ADDED
Binary file (4.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc ADDED
Binary file (4.39 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ batched collective operations for overhead amortization and better
7
+ bandwidth utilization
8
+ """
9
+
10
+ import math
11
+ from typing import List
12
+ import torch
13
+ from torch import Tensor
14
+ from deepspeed import comm as dist
15
+ from deepspeed.comm import ProcessGroup, all_to_all_single
16
+ from deepspeed.accelerator import get_accelerator
17
+ from deepspeed.utils import instrument_w_nvtx
18
+ from deepspeed.ops import op_builder
19
+ from deepspeed.utils import logger
20
+
21
+
22
+ def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False):
23
+ return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=False)
24
+
25
+
26
+ quantizer_module = None
27
+
28
+
29
+ @instrument_w_nvtx
30
+ @torch.no_grad()
31
+ def all_to_all_quant_reduce(tensors: List[Tensor], groups: {}) -> List[Tensor]:
32
+ global quantizer_module
33
+ if quantizer_module is None:
34
+ quantizer_module = op_builder.QuantizerBuilder().load()
35
+ local_world_size = get_accelerator().device_count()
36
+ global_world_size = dist.get_world_size()
37
+ num_nodes = global_world_size // local_world_size
38
+ this_rank = dist.get_rank()
39
+ intra_idx = int(this_rank / local_world_size)
40
+ inter_idx = this_rank % local_world_size
41
+ output_lst: List[Tensor] = [None] * len(tensors)
42
+ for idx, tensor in enumerate(tensors):
43
+ if tensor.dim() == 1:
44
+ output_lst[idx] = reduce_scatter_coalesced([tensor])[0]
45
+ elif tensor.numel() % (2 * global_world_size) != 0:
46
+ # Due to the constraint of 2-stage all-to-all, the input tensor must be divisible by 2 * global_world_size
47
+ # Otherwise, all-to-all cannot be performed because of shape mismatch.
48
+ # See more at https://github.com/microsoft/DeepSpeed/pull/5056
49
+ logger.warning(
50
+ f"qgZ falls back to reduce_scatter because tensor size = {tensor.numel()} is not divisible by (2 * global_world_size) = {2 * global_world_size}. Please consider allocating a new world to enable qgZ"
51
+ )
52
+ output_lst[idx] = reduce_scatter_coalesced([tensor])[0]
53
+ else:
54
+ intra_quant_group = max(tensor.shape[0], tensor.shape[1], global_world_size)
55
+
56
+ inter_quant_group = intra_quant_group // local_world_size
57
+ intra_quant_int4, intra_q_scales = quantizer_module.swizzle_quant(tensor, intra_quant_group, 4,
58
+ quantizer_module.Symmetric, 1, num_nodes,
59
+ local_world_size)
60
+ local_output = torch.empty_like(intra_quant_int4)
61
+ scale_output = torch.empty_like(intra_q_scales)
62
+ all_to_all_single(local_output, intra_quant_int4, group=groups[f'local_{intra_idx}'])
63
+ all_to_all_single(scale_output, intra_q_scales, group=groups[f'local_{intra_idx}'])
64
+ global_input_tensor, global_scales = quantizer_module.quantized_reduction(
65
+ local_output, scale_output, intra_quant_group, inter_quant_group, 4, quantizer_module.Symmetric,
66
+ local_world_size)
67
+ global_output = torch.empty_like(global_input_tensor)
68
+ global_scale_output = torch.empty_like(global_scales)
69
+ all_to_all_single(global_output, global_input_tensor, group=groups[f'global_{inter_idx}'])
70
+ all_to_all_single(global_scale_output, global_scales, group=groups[f'global_{inter_idx}'])
71
+ final_output = quantizer_module.dequantize(global_output, global_scale_output, global_scale_output.numel(),
72
+ 4, quantizer_module.Symmetric)
73
+ assert final_output.numel(
74
+ ) % num_nodes == 0, f"final_output.numel()={final_output.numel()} is not divisible by num_nodes={num_nodes}"
75
+ output_lst[idx] = (sum(list(final_output.chunk(num_nodes))) / num_nodes).view(-1)
76
+ return output_lst
77
+
78
+
79
+ @instrument_w_nvtx
80
+ @torch.no_grad()
81
+ def reduce_scatter_coalesced(
82
+ tensors: List[Tensor],
83
+ group: ProcessGroup = None,
84
+ ) -> List[Tensor]:
85
+ """simultaneously reduce-scatter a list of tensors - this can be done more
86
+ efficiently than individual reduce scatter calls
87
+ TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
88
+ """
89
+ this_rank = dist.get_rank(group)
90
+ world_sz = dist.get_world_size(group)
91
+
92
+ partition_lst_for_each_tensor = [None] * len(tensors)
93
+ for tensor_idx, tensor in enumerate(tensors):
94
+ flattened_tensor = tensor.view(-1)
95
+ chunk_sz = math.ceil(tensor.numel() / world_sz)
96
+ partition_lst_for_each_tensor[tensor_idx] = [
97
+ flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz)
98
+ ]
99
+
100
+ padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors)
101
+
102
+ if len(tensors) == 1 and tensors[0].numel() % world_sz == 0:
103
+ # if there's only one tensor being reduced and we don't need to pad
104
+ # we have an opportunity to avoid a memory allocation
105
+ tensor_partition_flat_buffer = tensors[0].view(-1)
106
+ else:
107
+ # interleave tensor partitions such that the correct reduced partitions of each tensor
108
+ # end up at each rank
109
+ tensor_partitions_lst_with_padding = []
110
+ for rank in range(world_sz):
111
+ for tensor_idx in range(len(tensors)):
112
+ # add tensor content
113
+ tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank]
114
+ tensor_partitions_lst_with_padding.append(tensor_chunk)
115
+
116
+ # add padding if necessary
117
+ padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel()
118
+ if padding_sz > 0:
119
+ tensor_partitions_lst_with_padding.append(
120
+ torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device))
121
+
122
+ tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding)
123
+
124
+ tensor_partition_flat_buffer.div_(world_sz) # pre-divide
125
+ tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz)
126
+
127
+ # batched reduce-scatter call
128
+ _torch_reduce_scatter_fn(tensor_partition_flat_buffer,
129
+ tensor_partition_buffer_for_each_rank[this_rank],
130
+ group=group)
131
+
132
+ # reverse procedure of the interleaving done previously, done on the
133
+ # result of the batched reduce-scatter
134
+ output_lst: List[Tensor] = [None] * len(tensors)
135
+ offset = 0
136
+ for tensor_idx in range(len(tensors)):
137
+ output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow(
138
+ 0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel())
139
+
140
+ offset += padded_partition_sz_for_each_tensor[tensor_idx]
141
+ return output_lst
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch_npu
9
+ import deepspeed.comm as dist
10
+
11
+
12
+ class HcclBackend(object):
13
+
14
+ def __init__(self, mpu=None):
15
+ if mpu is None:
16
+ self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
17
+ else:
18
+ self.mpu = mpu
19
+ self.world_group = self.mpu.get_data_parallel_group()
20
+ self.size = dist.get_world_size(group=self.world_group)
21
+ self.rank = dist.get_rank(group=self.world_group)
22
+
23
+ def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
24
+ req = []
25
+ if rank == root:
26
+ for idx in range(size):
27
+ if idx != rank:
28
+ req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
29
+ else:
30
+ recvbuf[rank] = sendbuf
31
+ else:
32
+ req.append(dist.isend(sendbuf, group=group, dst=root))
33
+ return req
34
+
35
+ def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
36
+ if rank == root:
37
+ for idx in range(size):
38
+ if idx != rank:
39
+ dist.recv(recvbuf[idx], src=idx, group=group)
40
+ else:
41
+ recvbuf[rank] = sendbuf
42
+ else:
43
+ dist.send(sendbuf, group=group, dst=root)
44
+
45
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
46
+ original_shape = buffer_m.size()
47
+ if len(original_shape) > 1:
48
+ buffer_m = torch.flatten(buffer_m)
49
+
50
+ # align size of original_buffer and error
51
+ original_size = buffer_m.numel()
52
+ worker_error_size = worker_error.numel()
53
+ if original_size != worker_error_size:
54
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
55
+ buffer_m = torch.cat([buffer_m, empty_tensor])
56
+
57
+ buffer_m.add_(worker_error)
58
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
59
+
60
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
61
+
62
+ sign_list_packed_tmp = torch_npu.npu_sign_bits_pack(buffer_m, self.size).type(torch.int8)
63
+
64
+ recvbuf_sign = torch.zeros([self.size, len(sign_list_packed_tmp[self.rank])],
65
+ dtype=sign_list_packed_tmp[0].dtype,
66
+ device=sign_list_packed_tmp.device)
67
+
68
+ sign_list_packed = [sign_list_packed_tmp[idx] for idx in range(self.size)]
69
+
70
+ recvbuf_scale = [
71
+ torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(local_rank)) for _ in range(self.size)
72
+ ]
73
+
74
+ # communication phase 1
75
+ # all to all for sign
76
+ dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
77
+ # all gather for scale
78
+ dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
79
+
80
+ flattened_recvbuf_sign = recvbuf_sign.type(torch.uint8).flatten()
81
+ compensated_server_m = torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign, self.size, torch.float32) \
82
+ .mul_(torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
83
+
84
+ compensated_server_m.add_(server_error)
85
+
86
+ server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
87
+
88
+ server_error.set_(compensated_server_m -
89
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
90
+
91
+ server_sign_packed = torch_npu.npu_sign_bits_pack(compensated_server_m, 1).type(torch.int8)
92
+
93
+ # recvbuf_sign_server
94
+ recvbuf_sign_server_tmp = torch.zeros([self.size, len(server_sign_packed[0])],
95
+ dtype=recvbuf_sign.dtype,
96
+ device=server_sign_packed.device)
97
+
98
+ recvbuf_sign_server = [recvbuf_sign_server_tmp[idx] for idx in range(self.size)]
99
+
100
+ # recvbuf_scale_server
101
+ recvbuf_scale_server_tmp = torch.zeros([self.size, 1],
102
+ dtype=worker_scale.dtype,
103
+ device=server_sign_packed.device)
104
+
105
+ recvbuf_scale_server = [recvbuf_scale_server_tmp[idx] for idx in range(self.size)]
106
+
107
+ # communication Phase 2
108
+ dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
109
+ dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
110
+
111
+ recvbuf_sign_server = torch.stack(recvbuf_sign_server)
112
+
113
+ flattened_recvbuf_sign_server = recvbuf_sign_server.type(torch.uint8).flatten()
114
+
115
+ buffer_m.data.copy_(
116
+ torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign_server, self.size,
117
+ torch.float32).mul_(recvbuf_scale_server_tmp).flatten().data)
118
+
119
+ if original_size != worker_error_size:
120
+ buffer_m = buffer_m[0:original_size]
121
+ if len(original_shape) > 1:
122
+ buffer_m = buffer_m.reshape(original_shape)
123
+
124
+ return buffer_m
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import cupy
8
+ import time
9
+ import numpy as np
10
+ from mpi4py import MPI
11
+
12
+ from deepspeed.runtime.compression.cupy import CupyBackend
13
+
14
+
15
+ class MpiBackend(object):
16
+
17
+ def __init__(self, cuda_aware):
18
+ self.comm = MPI.COMM_WORLD
19
+ self.rank = self.comm.Get_rank()
20
+ self.size = self.comm.Get_size()
21
+ self.cuda_aware = cuda_aware
22
+ self.compression_backend = CupyBackend()
23
+
24
+ def my_igather(self, rank, size, comm, sendbuf, recbuf, root):
25
+ req = []
26
+ if rank == root:
27
+ for idx in range(size):
28
+ if idx != rank:
29
+ req.append(comm.Irecv(recbuf[idx], source=idx))
30
+ else:
31
+ recbuf[rank] = sendbuf
32
+ else:
33
+ req.append(comm.Isend(sendbuf, dest=root))
34
+ return req
35
+
36
+ def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
37
+ cupy_recvbuf_scale):
38
+ # We do in-place operations on cupy buffers so we do not return any buffers
39
+ requests = []
40
+ for idx in range(world_size):
41
+ req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx)
42
+ requests += req_sign
43
+
44
+ for idx in range(world_size):
45
+ req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx)
46
+ requests += req_scale
47
+
48
+ MPI.Request.Waitall(requests)
49
+
50
+ def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
51
+ cupy_recvbuf_scale):
52
+
53
+ # In-place operations are not possible for newly created cupy arrays
54
+ # so we need to return the new buffers
55
+ numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size],
56
+ dtype=cupy_sign_list_packed[0].dtype)
57
+ numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype)
58
+
59
+ # 1. convert from cupy to numpy
60
+ numpy_sign_list_packed = cupy_sign_list_packed
61
+
62
+ for idx in range(world_size):
63
+ numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx])
64
+
65
+ numpy_worker_scale = cupy.asnumpy(cupy_worker_scale)
66
+ numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale)
67
+
68
+ cupy.cuda.get_current_stream().synchronize()
69
+
70
+ # 2. use numpy buffers for communication
71
+ requests = []
72
+
73
+ for idx in range(world_size):
74
+ req_sign = self.my_igather(rank,
75
+ world_size,
76
+ comm,
77
+ numpy_sign_list_packed[idx],
78
+ numpy_recvbuf_sign,
79
+ root=idx)
80
+ requests += req_sign
81
+
82
+ for idx in range(world_size):
83
+ req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx)
84
+ requests += req_scale
85
+
86
+ MPI.Request.Waitall(requests)
87
+
88
+ # 3. Convert back from numpy to cupy
89
+ cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign)
90
+ for idx in range(world_size):
91
+ cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx])
92
+
93
+ cupy_worker_scale = cupy.asarray(numpy_worker_scale)
94
+ cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale)
95
+ cupy.cuda.get_current_stream().synchronize()
96
+
97
+ return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale
98
+
99
+ def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
100
+ cupy_recvbuf_scale_server):
101
+ comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server)
102
+ comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server)
103
+
104
+ def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
105
+ cupy_recvbuf_scale_server):
106
+
107
+ # 1. Convert cupy to numpy
108
+ numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size],
109
+ dtype=cupy_server_sign_packed.dtype)
110
+ numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype)
111
+
112
+ numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed)
113
+ numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server)
114
+ numpy_server_scale = cupy.asnumpy(cupy_server_scale)
115
+ numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server)
116
+ cupy.cuda.get_current_stream().synchronize()
117
+
118
+ # 2. Communicate numpy buffers
119
+ comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server)
120
+ comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server)
121
+ comm.Barrier()
122
+
123
+ # 3. Convert numpy back to cupy
124
+ cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed)
125
+ cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server)
126
+ cupy_server_scale = cupy.asarray(numpy_server_scale)
127
+ cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server)
128
+ cupy.cuda.get_current_stream().synchronize()
129
+
130
+ return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server
131
+
132
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
133
+
134
+ all_start_time = time.time()
135
+ original_shape = buffer_m.size()
136
+ if len(original_shape) > 1:
137
+ buffer_m = torch.flatten(buffer_m)
138
+ original_size = buffer_m.numel()
139
+ worker_error_size = worker_error.numel()
140
+ cupy.cuda.Device(local_rank).use()
141
+
142
+ if original_size != worker_error_size:
143
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
144
+ buffer_m = torch.cat([buffer_m, empty_tensor])
145
+
146
+ buffer_m.add_(worker_error)
147
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
148
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
149
+
150
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
151
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
152
+ cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
153
+
154
+ cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
155
+ dtype=cupy_sign_list_packed[0].dtype)
156
+ cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
157
+
158
+ # Communication Phase 1
159
+ gather_start = time.time()
160
+ if self.cuda_aware:
161
+ self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign,
162
+ cupy_worker_scale, cupy_recvbuf_scale)
163
+ else:
164
+ _, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm,
165
+ cupy_sign_list_packed, cupy_recvbuf_sign,
166
+ cupy_worker_scale, cupy_recvbuf_scale)
167
+ gather_end = time.time()
168
+
169
+ # cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None
170
+ cupy_sign_list_packed = None
171
+
172
+ compensated_server_m = self.compression_backend.cupy2torch(
173
+ (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
174
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0)
175
+ compensated_server_m.add_(server_error)
176
+ server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
177
+ server_error.set_(compensated_server_m -
178
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
179
+
180
+ cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
181
+
182
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
183
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
184
+ compensated_server_m = None
185
+
186
+ cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
187
+ dtype=cupy_recvbuf_sign.dtype)
188
+ cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype)
189
+ # cupy_recvbuf_sign, cupy_recvbuf_scale = None, None
190
+ cupy_recvbuf_sign = None
191
+
192
+ # Communication Phase 2
193
+ if self.cuda_aware:
194
+ self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
195
+ cupy_recvbuf_scale_server)
196
+ else:
197
+ _, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host(
198
+ self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
199
+ cupy_recvbuf_scale_server)
200
+
201
+ # cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None
202
+ cupy_server_sign_packed = None
203
+
204
+ buffer_m.data.copy_(
205
+ self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
206
+ self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
207
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
208
+ if original_size != worker_error_size:
209
+ buffer_m = buffer_m[0:original_size]
210
+ if len(original_shape) > 1:
211
+ buffer_m = buffer_m.reshape(original_shape)
212
+
213
+ # cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None
214
+
215
+ return buffer_m
venv/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed import comm as dist
8
+ import cupy
9
+ import numpy as np
10
+
11
+ from deepspeed.runtime.compression.cupy import CupyBackend
12
+ from deepspeed.utils.torch import required_torch_version
13
+ from deepspeed.accelerator import get_accelerator
14
+
15
+
16
+ class NcclBackend(object):
17
+
18
+ def __init__(self, mpu=None):
19
+ if mpu is None:
20
+ self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
21
+ else:
22
+ self.mpu = mpu
23
+ self.world_group = self.mpu.get_data_parallel_group()
24
+ self.rank = dist.get_rank(group=self.world_group)
25
+ self.size = dist.get_world_size(group=self.world_group)
26
+ self.compression_backend = CupyBackend()
27
+ self.bool_not_supported = required_torch_version(min_version=1.10)
28
+
29
+ def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
30
+ req = []
31
+ if rank == root:
32
+ for idx in range(size):
33
+ if idx != rank:
34
+ req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
35
+ else:
36
+ recvbuf[rank] = sendbuf
37
+ else:
38
+ req.append(dist.isend(sendbuf, group=group, dst=root))
39
+ return req
40
+
41
+ def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
42
+ if rank == root:
43
+ for idx in range(size):
44
+ if idx != rank:
45
+ dist.recv(recvbuf[idx], src=idx, group=group)
46
+ else:
47
+ recvbuf[rank] = sendbuf
48
+ else:
49
+ dist.send(sendbuf, group=group, dst=root)
50
+
51
+ def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
52
+
53
+ # all_start_time = time.time()
54
+ original_shape = buffer_m.size()
55
+ if len(original_shape) > 1:
56
+ buffer_m = torch.flatten(buffer_m)
57
+ original_size = buffer_m.numel()
58
+ worker_error_size = worker_error.numel()
59
+ cupy.cuda.Device(local_rank).use()
60
+
61
+ if original_size != worker_error_size:
62
+ empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
63
+ buffer_m = torch.cat([buffer_m, empty_tensor])
64
+
65
+ buffer_m.add_(worker_error)
66
+ worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(buffer_m.numel())
67
+ worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
68
+
69
+ if self.bool_not_supported:
70
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
71
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size)
72
+ else:
73
+ cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
74
+ self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
75
+ cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
76
+
77
+ cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
78
+ dtype=cupy_sign_list_packed[0].dtype)
79
+ # cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
80
+
81
+ sign_list_packed = [
82
+ self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size)
83
+ ]
84
+
85
+ # worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale)
86
+ recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign)
87
+ #recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale)
88
+ recvbuf_scale = [
89
+ torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank)))
90
+ for i in range(self.size)
91
+ ]
92
+
93
+ # communication phase 1
94
+ # gather_start = time.time()
95
+ # Alltoall for sign
96
+ dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
97
+ # Allgather for scale
98
+ dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
99
+
100
+ # gather_end = time.time()
101
+
102
+ # cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None
103
+ cupy_sign_list_packed = None
104
+
105
+ cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign)
106
+ #cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale))
107
+
108
+ compensated_server_m = self.compression_backend.cupy2torch(
109
+ (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
110
+ torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
111
+ compensated_server_m.add_(server_error)
112
+ server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
113
+ server_error.set_(compensated_server_m -
114
+ server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
115
+
116
+ # cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
117
+
118
+ if self.bool_not_supported:
119
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
120
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)),
121
+ 1)
122
+ else:
123
+ cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
124
+ self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
125
+ compensated_server_m = None
126
+
127
+ cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
128
+ dtype=cupy_recvbuf_sign.dtype)
129
+ # cupy_recvbuf_sign, recvbuf_sign = None, None
130
+ cupy_recvbuf_sign = None
131
+
132
+ server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])]
133
+ recvbuf_sign_server = [
134
+ self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size)
135
+ ]
136
+
137
+ # server_scale = self.compression_backend.cupy2torch(cupy_server_scale)
138
+ cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
139
+ # cupy_recvbuf_scale, recvbuf_scale = None, None
140
+
141
+ recvbuf_scale_server = [
142
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size)
143
+ ]
144
+
145
+ # Communication Phase 2
146
+ dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
147
+ dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
148
+
149
+ cupy_server_sign_packed = None
150
+
151
+ # need to convert from a tensor list to a single tensor
152
+ # dist.all_gather only provides a tensor list as the recv/output buffer
153
+ recvbuf_sign_server = torch.stack(recvbuf_sign_server)
154
+
155
+ cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server)
156
+
157
+ buffer_m.data.copy_(
158
+ self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
159
+ self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
160
+ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
161
+ if original_size != worker_error_size:
162
+ buffer_m = buffer_m[0:original_size]
163
+ if len(original_shape) > 1:
164
+ buffer_m = buffer_m.reshape(original_shape)
165
+
166
+ return buffer_m
venv/lib/python3.10/site-packages/deepspeed/runtime/config_utils.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Collection of DeepSpeed configuration utilities
7
+ """
8
+ import json
9
+ import collections
10
+ import collections.abc
11
+ from functools import reduce
12
+ from deepspeed.pydantic_v1 import BaseModel
13
+ from deepspeed.utils import logger
14
+
15
+
16
+ class DeepSpeedConfigModel(BaseModel):
17
+ """
18
+ This class should be used as a base for all DeepSpeed configs. It extends
19
+ pydantic.BaseModel to allow for deprecated fields. To enable this feature,
20
+ add deprecated=True to pydantic.Field:
21
+
22
+ my_dep_field: int = Field(0, deprecated=True)
23
+
24
+ Deprecated Field kwargs:
25
+ - deprecated: [True|False], default False
26
+ Enables / Disables deprecated fields
27
+ - deprecated_msg: str, default ""
28
+ Message to include with deprecation warning
29
+ - new_param: str, default ""
30
+ Name of the field replacing the deprecated field
31
+ - set_new_param: [True|False], default True
32
+ If new_param is provided, enables setting the value of that param with
33
+ deprecated field value
34
+ - new_param_fn: callable, default (lambda x: x)
35
+ If new_param is provided and set_new_param is True, this function will
36
+ modify the value of the deprecated field before placing that value in
37
+ the new_param field
38
+
39
+ Example:
40
+ my_new_field is replacing a deprecated my_old_field. The expected type
41
+ for my_new_field is int while the expected type for my_old_field is
42
+ str. We want to maintain backward compatibility with our configs, so we
43
+ define the fields with:
44
+
45
+ class MyExampleConfig(DeepSpeedConfigModel):
46
+ my_new_field: int = 0
47
+ my_old_field: str = Field('0',
48
+ deprecated=True,
49
+ new_param='my_new_field',
50
+ new_param_fn=(lambda x: int(x)))
51
+ """
52
+
53
+ def __init__(self, strict=False, **data):
54
+ if (not strict): # This is temporary until we refactor all DS configs, allows HF to load models
55
+ data = {k: v for k, v in data.items() if (v != "auto" or k == "replace_method")}
56
+ super().__init__(**data)
57
+ self._deprecated_fields_check(self)
58
+
59
+ def _process_deprecated_field(self, pydantic_config, field):
60
+ # Get information about the deprecated field
61
+ fields_set = pydantic_config.__fields_set__
62
+ dep_param = field.name
63
+ kwargs = field.field_info.extra
64
+ new_param_fn = kwargs.get("new_param_fn", lambda x: x)
65
+ param_value = new_param_fn(getattr(pydantic_config, dep_param))
66
+ new_param = kwargs.get("new_param", "")
67
+ dep_msg = kwargs.get("deprecated_msg", "")
68
+ if dep_param in fields_set:
69
+ logger.warning(f"Config parameter {dep_param} is deprecated" +
70
+ (f" use {new_param} instead" if new_param else "") + (f". {dep_msg}" if dep_msg else ""))
71
+ # Check if there is a new param and if it should be set with a value
72
+ if new_param and kwargs.get("set_new_param", True):
73
+ # Remove the deprecate field if there is a replacing field
74
+ try:
75
+ delattr(pydantic_config, dep_param)
76
+ except Exception as e:
77
+ logger.error(f"Tried removing deprecated '{dep_param}' from config")
78
+ raise e
79
+
80
+ # Set new param value
81
+ new_param_nested = new_param.split(".")
82
+ if len(new_param_nested) > 1:
83
+ # If the new param exists in a subconfig, we need to get
84
+ # the fields set for that subconfig
85
+ pydantic_config = reduce(getattr, new_param_nested[:-1], pydantic_config)
86
+ fields_set = pydantic_config.__fields_set__
87
+ new_param_name = new_param_nested[-1]
88
+ assert (
89
+ new_param_name not in fields_set
90
+ ), f"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together"
91
+ # A custom function for converting the old param value to new param value can be provided
92
+ try:
93
+ setattr(pydantic_config, new_param_name, param_value)
94
+ except Exception as e:
95
+ logger.error(f"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'")
96
+ raise e
97
+
98
+ def _deprecated_fields_check(self, pydantic_config):
99
+ fields = pydantic_config.__fields__
100
+ for field in fields.values():
101
+ if field.field_info.extra.get("deprecated", False):
102
+ self._process_deprecated_field(pydantic_config, field)
103
+
104
+ class Config:
105
+ validate_all = True
106
+ validate_assignment = True
107
+ use_enum_values = True
108
+ allow_population_by_field_name = True
109
+ extra = "forbid"
110
+ arbitrary_types_allowed = True
111
+
112
+
113
+ def get_config_default(config, field_name):
114
+ assert field_name in config.__fields__, f"'{field_name}' is not a field in {config}"
115
+ assert not config.__fields__.get(
116
+ field_name).required, f"'{field_name}' is a required field and does not have a default value"
117
+ return config.__fields__.get(field_name).default
118
+
119
+
120
+ class pp_int(int):
121
+ """
122
+ A wrapper for integers that will return a custom string or comma-formatted
123
+ string of the integer. For example, print(pp_int(1e5)) will return
124
+ "10,000". This is useful mainly for auto-generated documentation purposes.
125
+ """
126
+
127
+ def __new__(cls, val, custom_print_str=None):
128
+ inst = super().__new__(cls, val)
129
+ inst.custom_print_str = custom_print_str
130
+ return inst
131
+
132
+ def __repr__(self):
133
+ if self.custom_print_str:
134
+ return self.custom_print_str
135
+ return f"{self.real:,}"
136
+
137
+
138
+ # adapted from https://stackoverflow.com/a/50701137/9201239
139
+ class ScientificNotationEncoder(json.JSONEncoder):
140
+ """
141
+ This class overrides ``json.dumps`` default formatter.
142
+
143
+ This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation.
144
+
145
+ Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it
146
+
147
+ """
148
+
149
+ def iterencode(self, o, _one_shot=False, level=0):
150
+ indent = self.indent if self.indent is not None else 4
151
+ prefix_close = " " * level * indent
152
+ level += 1
153
+ prefix = " " * level * indent
154
+ if isinstance(o, bool):
155
+ return "true" if o else "false"
156
+ elif isinstance(o, float) or isinstance(o, int):
157
+ if o > 1e3:
158
+ return f"{o:e}"
159
+ else:
160
+ return f"{o}"
161
+ elif isinstance(o, collections.abc.Mapping):
162
+ x = [f'\n{prefix}"{k}": {self.iterencode(v, level=level)}' for k, v in o.items()]
163
+ return "{" + ", ".join(x) + f"\n{prefix_close}" + "}"
164
+ elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str):
165
+ return f"[{ f', '.join(map(self.iterencode, o)) }]"
166
+ return "\n, ".join(super().iterencode(o, _one_shot))
167
+
168
+
169
+ class DeepSpeedConfigObject(object):
170
+ """
171
+ For json serialization
172
+ """
173
+
174
+ def repr(self):
175
+ return self.__dict__
176
+
177
+ def __repr__(self):
178
+ return json.dumps(
179
+ self.__dict__,
180
+ sort_keys=True,
181
+ indent=4,
182
+ cls=ScientificNotationEncoder,
183
+ )
184
+
185
+
186
+ def get_scalar_param(param_dict, param_name, param_default_value):
187
+ return param_dict.get(param_name, param_default_value)
188
+
189
+
190
+ def get_list_param(param_dict, param_name, param_default_value):
191
+ return param_dict.get(param_name, param_default_value)
192
+
193
+
194
+ def get_dict_param(param_dict, param_name, param_default_value):
195
+ return param_dict.get(param_name, param_default_value)
196
+
197
+
198
+ def dict_raise_error_on_duplicate_keys(ordered_pairs):
199
+ """Reject duplicate keys."""
200
+ d = dict((k, v) for k, v in ordered_pairs)
201
+ if len(d) != len(ordered_pairs):
202
+ counter = collections.Counter([pair[0] for pair in ordered_pairs])
203
+ keys = [key for key, value in counter.items() if value > 1]
204
+ raise ValueError("Duplicate keys in DeepSpeed config: {}".format(keys))
205
+ return d
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (249 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (262 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/basic_layer.cpython-310.pyc ADDED
Binary file (3.67 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/helper.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/utils.cpython-310.pyc ADDED
Binary file (894 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.utils import logger
7
+ from torch import Tensor
8
+ from torch.nn import Module
9
+ from ..constants import *
10
+ from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
11
+
12
+
13
+ #####based on the paper random-ltd: https://arxiv.org/abs/2211.11586
14
+ class RandomLayerTokenDrop(Module):
15
+ """
16
+ A layer wrapper for random LTD
17
+ """
18
+
19
+ def __init__(self, layer: Module):
20
+ super(RandomLayerTokenDrop, self).__init__()
21
+ self.random_ltd_layer = layer
22
+ self.reserved_length = None #config['max_value']
23
+ self.random_ltd_scheduler = None
24
+ self.max_length = None
25
+ self.reserved_length = -1
26
+ self.curr_seq = -1
27
+ self.batch_first = False
28
+
29
+ def init_config(self, config, scheduler, random_ltd_layer_id):
30
+ self.random_ltd_scheduler = scheduler
31
+ self.random_ltd_layer_id = random_ltd_layer_id
32
+ self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE]
33
+
34
+ self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME]
35
+ self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE]
36
+ self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num
37
+ hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER]
38
+ self.model_type = config[RANDOM_LTD_MODEL_TYPE]
39
+
40
+ if hs_order == 'batch_seq_dim':
41
+ self.get_hidden_tensor_shape = self.get_bsh
42
+ self.batch_first = True
43
+ elif hs_order == 'seq_batch_dim':
44
+ self.get_hidden_tensor_shape = self.get_sbh
45
+ self.batch_first = False
46
+ else:
47
+ logger.warning(
48
+ "************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \
49
+ your own input dimension orders************")
50
+ raise NotImplementedError
51
+
52
+ if self.model_type == 'encoder':
53
+ self.index_generator = bert_sample_tokens
54
+ elif self.model_type == 'decoder':
55
+ self.index_generator = gpt_sample_tokens
56
+ else:
57
+ logger.warning("************For now, we only support encoder-only or decoder-only models************")
58
+ raise NotImplementedError
59
+
60
+ def get_bsh(self, hidden_stats):
61
+ self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0]
62
+
63
+ def get_sbh(self, hidden_stats):
64
+ self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1]
65
+
66
+ def forward(self, hidden_states, **kwargs) -> Tensor:
67
+ if self.random_ltd_scheduler is not None:
68
+ self.reserved_length = self.random_ltd_scheduler.get_current_seq()
69
+ self.get_hidden_tensor_shape(hidden_states)
70
+ if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq:
71
+ if self.mask_name is not None:
72
+ mask = kwargs[self.mask_name]
73
+ else:
74
+ mask = None
75
+ if self.random_ltd_layer_id == 0:
76
+ sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\
77
+ self.curr_seq, \
78
+ self.curr_micro_batch, \
79
+ self.random_ltd_num_layer, \
80
+ hidden_states.device, mask)
81
+ self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] = sampled_indices
82
+ self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] = part_attention_mask
83
+ else:
84
+ sampled_indices = self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX]
85
+ part_attention_mask = self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK]
86
+
87
+ hidden_states, part_hidden_states = GatherTokens.apply(hidden_states,
88
+ sampled_indices[self.random_ltd_layer_id, :, :],
89
+ self.batch_first)
90
+ if self.mask_name is not None:
91
+ if self.model_type == 'encoder':
92
+ kwargs[self.mask_name] = part_attention_mask[self.random_ltd_layer_id]
93
+ else:
94
+ kwargs[self.mask_name] = part_attention_mask
95
+
96
+ outputs = self.random_ltd_layer(part_hidden_states, **kwargs)
97
+
98
+ if isinstance(outputs, tuple):
99
+ hidden_states = ScatterTokens.apply(hidden_states, outputs[0],
100
+ sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
101
+ my_list = list(outputs)
102
+ my_list[0] = hidden_states
103
+ return tuple(my_list)
104
+ elif isinstance(outputs, Tensor):
105
+ hidden_states = ScatterTokens.apply(hidden_states, outputs,
106
+ sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
107
+ return hidden_states
108
+ else:
109
+ logger.warning("************For now, we only support tuple and tensor output. \
110
+ You need to adjust the output according to the layer in your model************")
111
+ raise NotImplementedError
112
+ else:
113
+ return self.random_ltd_layer(hidden_states, **kwargs)
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/helper.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .basic_layer import RandomLayerTokenDrop
7
+ from collections import OrderedDict
8
+ from deepspeed.compression.helper import recursive_getattr, recursive_setattr
9
+
10
+
11
+ def convert_to_random_ltd(model, convert_type):
12
+ if hasattr(model, 'module'):
13
+ c_model = model.module
14
+ else:
15
+ c_model = model
16
+
17
+ for name, module in c_model.named_modules():
18
+
19
+ if isinstance(module, convert_type):
20
+ old_module = recursive_getattr(c_model, name)
21
+ new_module = RandomLayerTokenDrop(old_module)
22
+ recursive_setattr(c_model, name, new_module)
23
+
24
+ model.random_ltd_initialize()
25
+ return model
26
+
27
+
28
+ def save_without_random_ltd(model):
29
+ if hasattr(model, 'module'):
30
+ c_model = model.module
31
+ else:
32
+ c_model = model
33
+
34
+ model_dic = c_model.state_dict()
35
+ return remove_random_ltd_state_dict(model_dic)
36
+
37
+
38
+ def remove_random_ltd_state_dict(state_dict):
39
+ new_state_dict = OrderedDict()
40
+ for key, value in state_dict.items():
41
+ if '.random_ltd_layer' in key:
42
+ new_key = ''.join(key.split('.random_ltd_layer'))
43
+ else:
44
+ new_key = key
45
+ new_state_dict[new_key] = value
46
+ return new_state_dict
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/scheduler.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import math
7
+
8
+ from deepspeed.utils import logger
9
+ # from deepspeed.runtime.lr_schedules import WarmupLR
10
+ from ..constants import *
11
+
12
+ #####based on the paper random-ltd: https://arxiv.org/abs/2211.11586
13
+
14
+
15
+ class BaseScheduler(object):
16
+
17
+ def __init__(self):
18
+ self.state = {}
19
+
20
+ def __fixed_root_get_value(self, global_steps, root_degree=None):
21
+ s_state = self.state[RANDOM_LTD_SCHEDULE_CONFIG]
22
+ if root_degree is None:
23
+ root_degree = s_state['root_degree']
24
+ next_seq = (float(global_steps) / s_state[RANDOM_LTD_REQUIRE_STEP])**(1.0 / root_degree)
25
+ next_seq = math.floor(next_seq * (self.state[RANDOM_LTD_MAX_VALUE] - self.state[RANDOM_LTD_MIN_VALUE]) +
26
+ self.state[RANDOM_LTD_MIN_VALUE])
27
+ next_seq -= (next_seq % s_state[RANDOM_LTD_INCREASE_STEP])
28
+ next_seq = min(next_seq, self.state[RANDOM_LTD_MAX_VALUE])
29
+ return next_seq
30
+
31
+ def get_value(self, global_steps):
32
+ if self.state[RANDOM_LTD_SCHEDULER_TYPE] == 'fixed_linear':
33
+ return self.__fixed_root_get_value(global_steps, 1)
34
+ else:
35
+ raise RuntimeError('Unsupported random LTD schedule type')
36
+
37
+
38
+ class RandomLTDScheduler(BaseScheduler):
39
+
40
+ def __init__(self, config):
41
+ super().__init__()
42
+ self.model_layer_num = config[RANDOM_LTD_TOTAL_LAYER_NUM]
43
+ self.random_ltd_layer_num = config[RANDOM_LTD_LAYER_NUM]
44
+ self.config_schedule = config[RANDOM_LTD_SCHEDULER]
45
+ self.global_batch_size = config[RANDOM_LTD_GLOBAL_BATCH_SIZE]
46
+ self.reset_to_init()
47
+
48
+ if config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]:
49
+ logger.warning("**********Work In Progress************")
50
+ raise NotImplementedError
51
+
52
+ self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0
53
+
54
+ # self.first_step = True
55
+ def get_total_layer_tokens(self, train_iters):
56
+ for step in range(train_iters):
57
+ self.update_seq(step)
58
+ return self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS]
59
+
60
+ def reset_to_init(self):
61
+ if self.config_schedule is not None:
62
+ self.state[RANDOM_LTD_MIN_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE]
63
+ self.state[RANDOM_LTD_MAX_VALUE] = self.config_schedule[RANDOM_LTD_MAX_VALUE]
64
+ self.state[RANDOM_LTD_CURRENT_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE]
65
+ self.state[RANDOM_LTD_SCHEDULE_CONFIG] = self.config_schedule[RANDOM_LTD_SCHEDULE_CONFIG]
66
+ self.state[RANDOM_LTD_SCHEDULER_TYPE] = self.config_schedule[RANDOM_LTD_SCHEDULER_TYPE]
67
+ self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0
68
+ self.state[RANDOM_LTD_CURR_STEP] = -1
69
+
70
+ def get_current_seq(self):
71
+ return self.state[RANDOM_LTD_CURRENT_VALUE]
72
+
73
+ def set_current_seq(self, seq_length):
74
+ self.state[RANDOM_LTD_CURRENT_VALUE] = seq_length
75
+
76
+ def get_random_ltd_layer_num(self):
77
+ return self.random_ltd_layer_num
78
+
79
+ def get_state(self):
80
+ return self.state
81
+
82
+ def set_state(self, state):
83
+ self.state = state
84
+
85
+ def update_seq(self, global_steps):
86
+ if self.state[RANDOM_LTD_CURRENT_VALUE] < self.state[RANDOM_LTD_MAX_VALUE]:
87
+ self.state[RANDOM_LTD_CURRENT_VALUE] = self.get_value(global_steps)
88
+ if global_steps != self.state[RANDOM_LTD_CURR_STEP]:
89
+ self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] += self.global_batch_size*(self.state[RANDOM_LTD_CURRENT_VALUE] * self.random_ltd_layer_num \
90
+ + self.state[RANDOM_LTD_MAX_VALUE] * (self.model_layer_num - self.random_ltd_layer_num))
91
+ self.state[RANDOM_LTD_CURR_STEP] = global_steps
92
+
93
+ def state_dict(self):
94
+ return {
95
+ RANDOM_LTD_CONSUMED_LAYER_TOKENS: self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS],
96
+ RANDOM_LTD_CURR_STEP: self.state[RANDOM_LTD_CURR_STEP],
97
+ RANDOM_LTD_CURRENT_VALUE: self.state[RANDOM_LTD_CURRENT_VALUE],
98
+ RANDOM_LTD_MIN_VALUE: self.state[RANDOM_LTD_MIN_VALUE],
99
+ RANDOM_LTD_MAX_VALUE: self.state[RANDOM_LTD_MAX_VALUE],
100
+ }
101
+
102
+ def load_state_dict(self, state_dict):
103
+ self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = state_dict[RANDOM_LTD_CONSUMED_LAYER_TOKENS]
104
+ self.state[RANDOM_LTD_CURR_STEP] = state_dict[RANDOM_LTD_CURR_STEP]
105
+ self.state[RANDOM_LTD_CURRENT_VALUE] = state_dict[RANDOM_LTD_CURRENT_VALUE]
106
+ self.state[RANDOM_LTD_MIN_VALUE] = state_dict[RANDOM_LTD_MIN_VALUE]
107
+ self.state[RANDOM_LTD_MAX_VALUE] = state_dict[RANDOM_LTD_MAX_VALUE]
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/utils.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+
9
+ def bsh_decoder_gather(reserved_length, hidden_states, mask):
10
+ # random-layer-token-drop
11
+ rand_list = []
12
+ part_hidden_states = [] # batch, seq, hidden ## different from megatron
13
+ for k in range(hidden_states.size(0)):
14
+ B_tmp = torch.randperm(hidden_states.size(1), device=hidden_states.device)[:reserved_length]
15
+ B = B_tmp.sort()[0]
16
+ rand_list.append(B)
17
+ part_hidden_states.append(hidden_states[k:k + 1, B, :])
18
+
19
+ part_hidden_states = torch.cat(part_hidden_states, dim=0)
20
+ part_mask = mask[:, :, :reserved_length, :reserved_length]
21
+ return part_hidden_states, rand_list, part_mask
22
+
23
+
24
+ def bsh_decoder_scatter(hidden_states, part_hidden_states, rand_list):
25
+ for k in range(hidden_states.size(0)):
26
+ hidden_states[k, rand_list[k], :] = part_hidden_states[k, :, :]
27
+ return hidden_states
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (263 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/dataloader.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from torch.utils.data import DataLoader, RandomSampler
7
+ from torch.utils.data.distributed import DistributedSampler
8
+ from deepspeed.accelerator import get_accelerator
9
+
10
+ from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler
11
+ from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \
12
+ DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS
13
+ from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \
14
+ DATA_PARALLEL_GROUP, GLOBAL_RANK
15
+
16
+
17
+ class RepeatingLoader:
18
+
19
+ def __init__(self, loader):
20
+ """Wraps an iterator to allow for infinite iteration. This is especially useful
21
+ for DataLoader types that we wish to automatically restart upon completion.
22
+
23
+ Args:
24
+ loader (iterator): The data loader to repeat.
25
+ """
26
+ self.loader = loader
27
+ self.data_iter = iter(self.loader)
28
+
29
+ def __iter__(self):
30
+ return self
31
+
32
+ def __next__(self):
33
+ try:
34
+ batch = next(self.data_iter)
35
+ except StopIteration:
36
+ self.data_iter = iter(self.loader)
37
+ batch = next(self.data_iter)
38
+ return batch
39
+
40
+
41
+ class DeepSpeedDataLoader(object):
42
+
43
+ def __init__(self,
44
+ dataset,
45
+ batch_size,
46
+ pin_memory,
47
+ local_rank,
48
+ tput_timer,
49
+ collate_fn=None,
50
+ num_local_io_workers=None,
51
+ data_sampler=None,
52
+ data_parallel_world_size=None,
53
+ data_parallel_rank=None,
54
+ dataloader_drop_last=False,
55
+ deepspeed_dataloader_config={}):
56
+ self.deepspeed_dataloader_config = deepspeed_dataloader_config
57
+ self.tput_timer = tput_timer
58
+ self.batch_size = batch_size
59
+ self.curriculum_learning_enabled = False
60
+ if CURRICULUM_LEARNING in deepspeed_dataloader_config:
61
+ self.curriculum_learning_enabled = deepspeed_dataloader_config[CURRICULUM_LEARNING]
62
+
63
+ if self.curriculum_learning_enabled:
64
+ data_sampler = DeepSpeedDataSampler(self.deepspeed_dataloader_config[DATA_EFFICIENCY],
65
+ len(dataset),
66
+ self.batch_size,
67
+ data_parallel_rank,
68
+ data_parallel_world_size,
69
+ self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP],
70
+ self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS],
71
+ self.deepspeed_dataloader_config[GLOBAL_RANK],
72
+ drop_last=dataloader_drop_last)
73
+ device_count = get_accelerator().device_count()
74
+ num_local_io_workers = self.deepspeed_dataloader_config[DATA_SAMPLING_NUM_WORKERS]
75
+ else:
76
+ if local_rank >= 0:
77
+ if data_sampler is None:
78
+ data_sampler = DistributedSampler(dataset=dataset,
79
+ num_replicas=data_parallel_world_size,
80
+ rank=data_parallel_rank)
81
+ device_count = 1
82
+ else:
83
+ if data_sampler is None:
84
+ data_sampler = RandomSampler(dataset)
85
+ device_count = get_accelerator().device_count()
86
+ batch_size *= device_count
87
+
88
+ if num_local_io_workers is None:
89
+ num_local_io_workers = 2 * device_count
90
+
91
+ self.num_local_io_workers = num_local_io_workers
92
+ self.data_sampler = data_sampler
93
+ self.dataset = dataset
94
+ self.collate_fn = collate_fn
95
+ self.device_count = device_count
96
+ self.batch_size = batch_size
97
+ self.pin_memory = pin_memory
98
+ self.data = None
99
+ self.dataloader_drop_last = dataloader_drop_last
100
+ self.post_process_func = None
101
+
102
+ if self.dataloader_drop_last:
103
+ self.len = len(self.data_sampler) // self.batch_size
104
+ else:
105
+ from math import ceil
106
+ self.len = ceil(len(self.data_sampler) / self.batch_size)
107
+
108
+ def __iter__(self):
109
+ self._create_dataloader()
110
+ return self
111
+
112
+ def __len__(self):
113
+ return self.len
114
+
115
+ def __next__(self):
116
+ if self.tput_timer:
117
+ self.tput_timer.start()
118
+ if self.curriculum_learning_enabled:
119
+ data = next(self.data_iterator)
120
+ if self.post_process_func is not None:
121
+ data = self.post_process_func(data, self.data_sampler.state_dict())
122
+ return data
123
+ else:
124
+ return next(self.data)
125
+
126
+ def _create_dataloader(self):
127
+ if self.curriculum_learning_enabled:
128
+ if self.collate_fn is None:
129
+ self.dataloader = DataLoader(self.dataset,
130
+ pin_memory=self.pin_memory,
131
+ batch_sampler=self.data_sampler,
132
+ num_workers=self.num_local_io_workers)
133
+ else:
134
+ self.dataloader = DataLoader(self.dataset,
135
+ pin_memory=self.pin_memory,
136
+ batch_sampler=self.data_sampler,
137
+ collate_fn=self.collate_fn,
138
+ num_workers=self.num_local_io_workers)
139
+ self.data_iterator = iter(self.dataloader)
140
+ return self.dataloader
141
+ else:
142
+ if self.collate_fn is None:
143
+ self.dataloader = DataLoader(self.dataset,
144
+ batch_size=self.batch_size,
145
+ pin_memory=self.pin_memory,
146
+ sampler=self.data_sampler,
147
+ num_workers=self.num_local_io_workers,
148
+ drop_last=self.dataloader_drop_last)
149
+ else:
150
+ self.dataloader = DataLoader(self.dataset,
151
+ batch_size=self.batch_size,
152
+ pin_memory=self.pin_memory,
153
+ sampler=self.data_sampler,
154
+ collate_fn=self.collate_fn,
155
+ num_workers=self.num_local_io_workers,
156
+ drop_last=self.dataloader_drop_last)
157
+ self.data = (x for x in self.dataloader)
158
+
159
+ return self.dataloader
160
+
161
+
162
+ # DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2))
venv/lib/python3.10/site-packages/deepspeed/runtime/eigenvalue.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.utils import log_dist
8
+ import numpy as np
9
+ import logging
10
+
11
+
12
+ class Eigenvalue(object):
13
+
14
+ def __init__(self,
15
+ verbose=False,
16
+ max_iter=100,
17
+ tol=1e-2,
18
+ stability=0,
19
+ gas_boundary_resolution=1,
20
+ layer_name='',
21
+ layer_num=0):
22
+ super().__init__()
23
+
24
+ self.verbose = verbose
25
+ self.max_iter = max_iter
26
+ self.tol = tol
27
+ self.stability = stability
28
+ self.gas_boundary_resolution = gas_boundary_resolution
29
+ self.layer_name = layer_name
30
+ self.layer_num = layer_num
31
+
32
+ assert len(self.layer_name) > 0 and layer_num > 0
33
+
34
+ log_dist(
35
+ f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}',
36
+ ranks=[0])
37
+
38
+ # Replace all nan/pos-inf/neg-inf to zero
39
+ # TODO: Pytorch new version may add this function, replace this one by then.
40
+ def nan_to_num(self, x):
41
+ device = x.device
42
+ x = x.cpu().numpy()
43
+ x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
44
+ return torch.from_numpy(x).to(device)
45
+
46
+ def normalize(self, v):
47
+ norm_squared = self.inner_product(v, v)
48
+ norm = norm_squared**0.5 + self.stability
49
+ normalized_vectors = [vector / norm for vector in v]
50
+ normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors]
51
+ return normalized_vectors
52
+
53
+ def inner_product(self, xs, ys):
54
+ return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
55
+
56
+ def get_layers(self, module):
57
+ scope_names = self.layer_name.split('.')
58
+ assert len(scope_names) > 0
59
+
60
+ m = module
61
+ for name in scope_names:
62
+ assert hasattr(m, name), "layer_name configuration is invalid."
63
+ m = getattr(m, name)
64
+
65
+ return m
66
+
67
+ def compute_eigenvalue(self, module, device=None, scale=1.0):
68
+ block_eigenvalue = []
69
+ param_keys = []
70
+ layers = self.get_layers(module)
71
+
72
+ for block in range(self.layer_num):
73
+ model_block = layers[block]
74
+
75
+ # We found this randn() has obvious accuracy impact in some cases, save/recover random state here.
76
+ rng_state = torch.random.get_rng_state()
77
+ if device is None:
78
+ v = [
79
+ torch.randn(p.size()) for p in model_block.parameters()
80
+ if p.grad is not None and p.grad.grad_fn is not None
81
+ ]
82
+ else:
83
+ v = [
84
+ torch.randn(p.size(), device=device) for p in model_block.parameters()
85
+ if p.grad is not None and p.grad.grad_fn is not None
86
+ ]
87
+ torch.random.set_rng_state(rng_state)
88
+
89
+ grads = [
90
+ param.grad for param in model_block.parameters()
91
+ if param.grad is not None and param.grad.grad_fn is not None
92
+ ]
93
+ params = [
94
+ param for param in model_block.parameters()
95
+ if param.grad is not None and param.grad.grad_fn is not None
96
+ ]
97
+
98
+ layer_keys = [id(p) for p in model_block.parameters()]
99
+ param_keys.append(layer_keys)
100
+
101
+ v = self.normalize(v)
102
+
103
+ # Disable eigenvalue if the model doesn't support second order gradients computation,
104
+ # e.g. when enabling DS transformer kernel.
105
+ if len(grads) == 0 or len(params) == 0:
106
+ log_dist(f'The model does NOT support eigenvalue computation.', ranks=[0], level=logging.WARNING)
107
+ return []
108
+
109
+ i = 0
110
+ eigenvalue_current, eigenvalue_previous = 1., 0.
111
+
112
+ while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs(
113
+ (eigenvalue_current - eigenvalue_previous) / eigenvalue_current) >=
114
+ self.tol): # test convergence criteria
115
+ eigenvalue_previous = eigenvalue_current
116
+
117
+ Hv = torch.autograd.grad(grads, params, grad_outputs=v, only_inputs=True, retain_graph=True)
118
+ #Hv = [hv.float() for hv in Hv]
119
+ Hv = [self.nan_to_num(hv).float() for hv in Hv]
120
+
121
+ eigenvalue_current = self.inner_product(Hv, v).item()
122
+
123
+ v = self.normalize(Hv)
124
+ v = [x / scale for x in v]
125
+ i += 1
126
+
127
+ eigenvalue_current *= scale
128
+ block_eigenvalue.append(eigenvalue_current)
129
+
130
+ if self.verbose:
131
+ log_dist(f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}', ranks=[0])
132
+
133
+ block_eigenvalue = self.post_process(block_eigenvalue)
134
+
135
+ if self.verbose:
136
+ log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0])
137
+
138
+ # {param_id: (eigenvalue, layer_id)}
139
+ ev_dict = {}
140
+ for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)):
141
+ ev_dict.update(dict.fromkeys(layer_keys, (value, i)))
142
+
143
+ return ev_dict
144
+
145
+ # 1. Map all eigenvalues to [0, 1.0].
146
+ # 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead.
147
+ def post_process(self, value_list):
148
+ max_value = abs(max(value_list, key=abs))
149
+ return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list]
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (240 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright NVIDIA/apex
7
+ This file is adapted from FP16_Optimizer in NVIDIA/apex
8
+ """
9
+
10
+ import torch
11
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
12
+ from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer
13
+ from deepspeed.runtime.utils import get_global_norm, get_flattened_grad_norm, CheckOverflow, get_weight_norm, get_norm_with_moe_layers, is_model_parallel_parameter
14
+ from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
15
+ from deepspeed.utils import logger, log_dist
16
+ from deepspeed.utils.torch import required_torch_version
17
+ from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD
18
+ from deepspeed.accelerator import get_accelerator
19
+ from deepspeed.moe.utils import is_moe_param_group
20
+ from deepspeed.runtime.constants import PIPE_REPLICATED
21
+ from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank
22
+
23
+ OVERFLOW_CHECK_TIMER = 'overflow_check'
24
+ COMPUTE_NORM_TIMER = 'compute_norm'
25
+ UNSCALE_AND_CLIP_TIMER = 'unscale_and_clip'
26
+ BASIC_STEP_TIMER = 'basic_step'
27
+ UPDATE_FP16_TIMER = 'update_fp16'
28
+
29
+ OVERFLOW_TIMERS = [COMPUTE_NORM_TIMER, OVERFLOW_CHECK_TIMER]
30
+ STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP_TIMER, BASIC_STEP_TIMER, UPDATE_FP16_TIMER]
31
+
32
+
33
+ class FP16_Optimizer(DeepSpeedOptimizer):
34
+ """
35
+ FP16 Optimizer for training fp16 models. Handles loss scaling.
36
+
37
+ For usage example please see, TODO: DeepSpeed V2 Tutorial
38
+ """
39
+
40
+ def __init__(self,
41
+ init_optimizer,
42
+ deepspeed=None,
43
+ static_loss_scale=1.0,
44
+ dynamic_loss_scale=False,
45
+ initial_dynamic_scale=2**32,
46
+ dynamic_loss_args=None,
47
+ verbose=True,
48
+ mpu=None,
49
+ clip_grad=0.0,
50
+ fused_adam_legacy=False,
51
+ has_moe_layers=False,
52
+ timers=None):
53
+
54
+ self.fused_adam_legacy = fused_adam_legacy
55
+ self.timers = timers
56
+ self.deepspeed = deepspeed
57
+ self.has_moe_layers = has_moe_layers
58
+ self.using_pipeline = self.deepspeed.pipeline_parallelism
59
+ if not get_accelerator().is_available():
60
+ raise SystemError("Cannot use fp16 without accelerator.")
61
+ self.optimizer = init_optimizer
62
+
63
+ # param flattened by groups
64
+ self.fp16_groups = []
65
+ self.fp16_groups_flat = []
66
+ self.fp32_groups_flat = []
67
+
68
+ self.flatten_grad_norm_mask_list = []
69
+ self.has_executed_step = False
70
+ self._global_grad_norm = 0.
71
+
72
+ # loop to deal with groups
73
+ for i, param_group in enumerate(self.optimizer.param_groups):
74
+ # push this group to list before modify
75
+ self.fp16_groups.append(param_group['params'])
76
+ # init fp16 weight buffer, flattened
77
+ self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]]))
78
+ # set model fp16 weight to slices of flattened buffer
79
+ updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
80
+ for p, q in zip(self.fp16_groups[i], updated_params):
81
+ p.data = q.data
82
+ # init master weight, flattened
83
+ self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach())
84
+ # modify optimizer of have flat master weight
85
+ self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
86
+ param_group['params'] = [self.fp32_groups_flat[i]]
87
+
88
+ # we may have a way of fusing dynamic scale. Do not support for now
89
+ if dynamic_loss_scale:
90
+ self.dynamic_loss_scale = True
91
+ self.cur_iter = 0
92
+ self.last_overflow_iter = -1
93
+ self.scale_factor = 2
94
+
95
+ if dynamic_loss_args is None:
96
+ self.cur_scale = initial_dynamic_scale
97
+ self.scale_window = 1000
98
+ self.min_loss_scale = 1
99
+ else:
100
+ self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
101
+ self.scale_window = dynamic_loss_args[SCALE_WINDOW]
102
+ self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
103
+ else:
104
+ self.dynamic_loss_scale = False
105
+ self.cur_iter = 0
106
+ self.cur_scale = static_loss_scale
107
+ self.verbose = verbose
108
+
109
+ self.custom_loss_scaler = False
110
+ self.external_loss_scale = None
111
+
112
+ self.clip_grad = clip_grad
113
+ self.norm_type = 2
114
+
115
+ if required_torch_version(max_version=0.4):
116
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm
117
+ else:
118
+ self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
119
+
120
+ #model parallel object
121
+ self.mpu = mpu
122
+
123
+ self.overflow = False
124
+ self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
125
+ self.initialize_optimizer_states()
126
+
127
+ def initialize_optimizer_states(self):
128
+ for i, group in enumerate(self.fp16_groups):
129
+ self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(),
130
+ device=self.fp32_groups_flat[i].device)
131
+
132
+ self.optimizer.step()
133
+
134
+ for i, group in enumerate(self.fp16_groups):
135
+ self.fp32_groups_flat[i].grad = None
136
+
137
+ return
138
+
139
+ def zero_grad(self, set_to_none=True):
140
+ """
141
+ Zero FP16 parameter grads.
142
+ """
143
+ # For speed, set model fp16 grad to None by default
144
+ for group in self.fp16_groups:
145
+ for p in group:
146
+ if set_to_none:
147
+ p.grad = None
148
+ else:
149
+ if p.grad is not None:
150
+ p.grad.detach_()
151
+ p.grad.zero_()
152
+
153
+ def step_fused_adam(self, closure=None):
154
+ """
155
+ Not supporting closure.
156
+ """
157
+
158
+ # First compute norm for all group so we know if there is overflow
159
+ grads_groups_flat = []
160
+ norm_groups = []
161
+ for i, group in enumerate(self.fp16_groups):
162
+ grads_groups_flat.append(
163
+ _flatten_dense_tensors([
164
+ torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
165
+ ]))
166
+ norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu))
167
+
168
+ self.overflow = self.overflow_checker.check_using_norm(norm_groups)
169
+ prev_scale = self.cur_scale
170
+ self._update_scale(self.overflow)
171
+
172
+ if self.overflow:
173
+ if self.verbose:
174
+ logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
175
+ "scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
176
+ return self.overflow
177
+
178
+ scaled_grad_norm = get_global_norm(norm_list=norm_groups)
179
+
180
+ combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False)
181
+
182
+ # Stash unscaled gradient norm
183
+ self._global_grad_norm = scaled_grad_norm / self.cur_scale
184
+
185
+ # norm is in fact norm*cur_scale
186
+ self.optimizer.step(grads=[[g] for g in grads_groups_flat],
187
+ output_params=[[p] for p in self.fp16_groups_flat],
188
+ scale=combined_scale,
189
+ grad_norms=norm_groups)
190
+ # TODO: we probably don't need this? just to be safe
191
+ for i in range(len(norm_groups)):
192
+ updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
193
+ for p, q in zip(self.fp16_groups[i], updated_params):
194
+ p.data = q.data
195
+ return self.overflow
196
+
197
+ def set_lr(self, lr):
198
+ """Set the learning rate."""
199
+ for param_group in self.optimizer.param_groups:
200
+ param_group["lr"] = lr
201
+
202
+ def get_lr(self):
203
+ """Return the current learning rate."""
204
+ return self.optimizer.param_groups[0]["lr"]
205
+
206
+ def override_loss_scale(self, loss_scale):
207
+ if loss_scale != self.external_loss_scale:
208
+ logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
209
+ self.custom_loss_scaler = True
210
+ self.external_loss_scale = loss_scale
211
+
212
+ def _require_avoid_recompute_norm(self, p, tensor_model_parallel_rank):
213
+ # for filtering replicated tensors from tensor
214
+ if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
215
+ return True
216
+ if (tensor_model_parallel_rank > 0) and not is_model_parallel_parameter(p):
217
+ return True
218
+
219
+ def _get_norm_mask_idx(self, group):
220
+ """The function preserves the parallel information for norm
221
+ from unflattened gradients.
222
+
223
+ Args:
224
+ group (Iterable[Tensor] ): params group
225
+
226
+ Returns:
227
+ torch.Tensor: A 2D tensor containing index ranges for each group,
228
+ where each row represents a [start index, end index].
229
+ """
230
+ group_mask_idx_list = []
231
+ grad_flat_st_idx = 0
232
+ grad_flat_en_idx = 0
233
+
234
+ for p in group:
235
+ grad_flat_en_idx = grad_flat_st_idx + p.numel()
236
+ if p.grad is not None and self._require_avoid_recompute_norm(p, bwc_tensor_model_parallel_rank(self.mpu)):
237
+ # merge range
238
+ if len(group_mask_idx_list) > 0 and grad_flat_st_idx == group_mask_idx_list[-1][-1]:
239
+ group_mask_idx_list[-1][-1] = grad_flat_en_idx
240
+ else:
241
+ group_mask_idx_list.append([grad_flat_st_idx, grad_flat_en_idx])
242
+ grad_flat_st_idx = grad_flat_en_idx
243
+
244
+ return torch.tensor(group_mask_idx_list, device=get_accelerator().current_device())
245
+
246
+ def step(self, closure=None):
247
+ """
248
+ Not supporting closure.
249
+ """
250
+
251
+ if self.fused_adam_legacy:
252
+ return self.step_fused_adam()
253
+
254
+ # First determine if there is overflow.
255
+ self.timers(OVERFLOW_CHECK_TIMER).start()
256
+ fp16_params = []
257
+ for i, group in enumerate(self.fp16_groups):
258
+ fp16_params.extend([p for p in group if p.grad is not None])
259
+ self.overflow = self.overflow_checker.has_overflow(fp16_params)
260
+ self.timers(OVERFLOW_CHECK_TIMER).stop()
261
+ prev_scale = self.cur_scale
262
+ self._update_scale(self.overflow)
263
+ if self.overflow:
264
+ if self.verbose:
265
+ log_dist(
266
+ "Overflow detected. Skipping step. Attempted loss "
267
+ f"scale: {prev_scale}, reducing to {self.cur_scale}",
268
+ ranks=[0])
269
+ # Clear gradients
270
+ for i, group in enumerate(self.fp16_groups):
271
+ for p in group:
272
+ p.grad = None
273
+
274
+ self.timers.log(OVERFLOW_TIMERS)
275
+ return self.overflow
276
+
277
+ grads_groups_flat = []
278
+ non_experts_grads_for_norm = []
279
+ expert_grads_for_norm = {}
280
+ assert len(self.fp16_groups) == len(self.optimizer.param_groups)
281
+
282
+ for i, group in enumerate(self.fp16_groups):
283
+ data_type = self.fp32_groups_flat[i].dtype
284
+
285
+ grads_groups_flat.append(
286
+ _flatten_dense_tensors([
287
+ torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type)
288
+ for p in group
289
+ ]))
290
+
291
+ self.fp32_groups_flat[i].grad = grads_groups_flat[i]
292
+ param_group = self.optimizer.param_groups[i]
293
+
294
+ # split expert and non_expert grads for norm
295
+ if self.has_moe_layers and is_moe_param_group(param_group):
296
+ if param_group['name'] not in expert_grads_for_norm:
297
+ expert_grads_for_norm[param_group['name']] = []
298
+
299
+ expert_grads_for_norm[param_group['name']].append(self.fp32_groups_flat[i])
300
+ else:
301
+ # retrieves the required mask for calculating the norm of flat_grad
302
+ # perform this collect operation only once
303
+ if not self.has_executed_step:
304
+ cur_flat_grad_norm_mask = self._get_norm_mask_idx(group)
305
+ self.flatten_grad_norm_mask_list.append(cur_flat_grad_norm_mask)
306
+
307
+ non_experts_grads_for_norm.append(self.fp32_groups_flat[i])
308
+
309
+ for p in group:
310
+ p.grad = None
311
+
312
+ self.timers(COMPUTE_NORM_TIMER).start()
313
+
314
+ all_groups_norm = get_flattened_grad_norm(non_experts_grads_for_norm,
315
+ mpu=self.mpu,
316
+ grad_norm_mask=self.flatten_grad_norm_mask_list)
317
+
318
+ if self.has_moe_layers:
319
+ all_groups_norm = get_norm_with_moe_layers(all_groups_norm,
320
+ mpu=self.mpu,
321
+ expert_tensors=expert_grads_for_norm,
322
+ norm_type=self.norm_type)
323
+
324
+ scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm])
325
+ self.timers(COMPUTE_NORM_TIMER).stop()
326
+
327
+ # Stash unscaled gradient norm
328
+ self._global_grad_norm = scaled_global_grad_norm / self.cur_scale
329
+
330
+ self.timers(UNSCALE_AND_CLIP_TIMER).start()
331
+ self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm)
332
+ self.timers(UNSCALE_AND_CLIP_TIMER).stop()
333
+
334
+ self.timers(BASIC_STEP_TIMER).start()
335
+ self.optimizer.step()
336
+ self.timers(BASIC_STEP_TIMER).stop()
337
+
338
+ #get rid of the fp32 gradients. Not needed anymore
339
+ for group in self.fp32_groups_flat:
340
+ group.grad = None
341
+
342
+ self.timers(UPDATE_FP16_TIMER).start()
343
+
344
+ for i in range(len(self.fp16_groups)):
345
+ updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i])
346
+ for p, q in zip(self.fp16_groups[i], updated_params):
347
+ p.data.copy_(q.data)
348
+ self.has_executed_step = True
349
+ self.timers(UPDATE_FP16_TIMER).stop()
350
+
351
+ self.timers.log(STEP_TIMERS)
352
+
353
+ return self.overflow
354
+
355
+ def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True):
356
+ # compute combined scale factor for this group
357
+ combined_scale = self.cur_scale
358
+ if self.clip_grad > 0.:
359
+ # norm is in fact norm*scale
360
+ clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
361
+ if clip > 1:
362
+ combined_scale = clip * self.cur_scale
363
+
364
+ if apply_scale:
365
+ for grad in grad_groups_flat:
366
+ grad.data.mul_(1. / combined_scale)
367
+
368
+ return combined_scale
369
+
370
+ def backward(self, loss, create_graph=False, retain_graph=False):
371
+ """
372
+ :attr:`backward` performs the following steps:
373
+
374
+ 1. fp32_loss = loss.float()
375
+ 2. scaled_loss = fp32_loss*loss_scale
376
+ 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
377
+ """
378
+ if self.custom_loss_scaler:
379
+ scaled_loss = self.external_loss_scale * loss
380
+ scaled_loss.backward()
381
+ else:
382
+ scaled_loss = (loss.float()) * self.cur_scale
383
+ scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
384
+
385
+ def _update_scale(self, skip):
386
+ if self.dynamic_loss_scale:
387
+ prev_scale = self.cur_scale
388
+ if skip:
389
+ self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
390
+ self.last_overflow_iter = self.cur_iter
391
+ if self.verbose:
392
+ logger.info(f"\nGrad overflow on iteration {self.cur_iter}")
393
+ logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
394
+ else:
395
+ # Ensure self.scale_window updates since last overflow
396
+ stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
397
+ if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
398
+ self.cur_scale *= self.scale_factor
399
+ if self.verbose:
400
+ logger.info(f"No Grad overflow for {self.scale_window} iterations")
401
+ logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
402
+ else:
403
+ if skip:
404
+ logger.info("Grad overflow on iteration: %s", self.cur_iter)
405
+ logger.info("Using static loss scale of: %s", self.cur_scale)
406
+ self.cur_iter += 1
407
+ return
408
+
409
+ # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
410
+ def _get_state(self):
411
+ return self.optimizer.state
412
+
413
+ def _set_state(self, value):
414
+ self.optimizer.state = value
415
+
416
+ state = property(_get_state, _set_state)
417
+
418
+ # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
419
+ # (for example, to adjust the learning rate)
420
+ def _get_param_groups(self):
421
+ return self.optimizer.param_groups
422
+
423
+ def _set_param_groups(self, value):
424
+ self.optimizer.param_groups = value
425
+
426
+ param_groups = property(_get_param_groups, _set_param_groups)
427
+
428
+ def state_dict(self):
429
+ """
430
+ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
431
+ This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
432
+ of the contained Pytorch optimizer.
433
+ Example::
434
+ checkpoint = {}
435
+ checkpoint['model'] = model.state_dict()
436
+ checkpoint['optimizer'] = optimizer.state_dict()
437
+ torch.save(checkpoint, "saved.pth")
438
+ """
439
+ state_dict = {}
440
+ state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
441
+ state_dict['cur_scale'] = self.cur_scale
442
+ state_dict['cur_iter'] = self.cur_iter
443
+ if state_dict['dynamic_loss_scale']:
444
+ state_dict['last_overflow_iter'] = self.last_overflow_iter
445
+ state_dict['scale_factor'] = self.scale_factor
446
+ state_dict['scale_window'] = self.scale_window
447
+ state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
448
+ state_dict['fp32_groups_flat'] = self.fp32_groups_flat
449
+ state_dict[CLIP_GRAD] = self.clip_grad
450
+ return state_dict
451
+
452
+ # Refresh fp32 master params from fp16 copies
453
+ def refresh_fp32_params(self):
454
+ for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat):
455
+ current.data.copy_(saved.data)
456
+
457
+ def load_state_dict(self, state_dict, load_optimizer_states=True):
458
+ """
459
+ Loads a state_dict created by an earlier call to state_dict().
460
+ If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
461
+ whose parameters in turn came from ``model``, it is expected that the user
462
+ will call ``model.load_state_dict()`` before
463
+ ``fp16_optimizer_instance.load_state_dict()`` is called.
464
+ Example::
465
+ model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
466
+ optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
467
+ optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
468
+ ...
469
+ checkpoint = torch.load("saved.pth")
470
+ model.load_state_dict(checkpoint['model'])
471
+ optimizer.load_state_dict(checkpoint['optimizer'])
472
+ """
473
+ # I think it should actually be ok to reload the optimizer before the model.
474
+ self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
475
+ self.cur_scale = state_dict['cur_scale']
476
+ self.cur_iter = state_dict['cur_iter']
477
+ if state_dict['dynamic_loss_scale']:
478
+ self.last_overflow_iter = state_dict['last_overflow_iter']
479
+ self.scale_factor = state_dict['scale_factor']
480
+ self.scale_window = state_dict['scale_window']
481
+ if load_optimizer_states:
482
+ self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
483
+ self.clip_grad = state_dict[CLIP_GRAD]
484
+ # At this point, the optimizer's references to the model's fp32 parameters are up to date.
485
+ # The optimizer's hyperparameters and internal buffers are also up to date.
486
+ # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
487
+ # out of date. There are two options.
488
+ # 1: Refresh the master params from the model's fp16 params.
489
+ # This requires less storage but incurs precision loss.
490
+ # 2: Save and restore the fp32 master copies separately.
491
+ # We choose option 2.
492
+ #
493
+ # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
494
+ # of their associated parameters, because it's possible those buffers might not exist yet in
495
+ # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
496
+ # constructed in the same way as the one whose state_dict we are loading, the same master params
497
+ # are guaranteed to exist, so we can just copy_() from the saved master params.
498
+ for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
499
+ current.data.copy_(saved.data)
500
+
501
+ def __repr__(self):
502
+ return repr(self.optimizer)
503
+
504
+ # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
505
+ def _get_loss_scale(self):
506
+ if self.custom_loss_scaler:
507
+ return self.external_loss_scale
508
+ else:
509
+ return self.cur_scale
510
+
511
+ def _set_loss_scale(self, value):
512
+ self.loss_scaler.cur_scale = value
513
+
514
+ loss_scale = property(_get_loss_scale, _set_loss_scale)
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ """
6
+ Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
7
+
8
+ Licensed under the Apache License, Version 2.0 (the "License");
9
+ you may not use this file except in compliance with the License.
10
+ You may obtain a copy of the License at
11
+
12
+ http://www.apache.org/licenses/LICENSE-2.0
13
+
14
+ Unless required by applicable law or agreed to in writing, software
15
+ distributed under the License is distributed on an "AS IS" BASIS,
16
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ See the License for the specific language governing permissions and
18
+ limitations under the License.
19
+ Taken and modified for DeepSpeed from:
20
+ https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py
21
+ Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9
22
+ """
23
+
24
+ import torch
25
+ from deepspeed import comm as dist
26
+ from deepspeed.utils import logger
27
+
28
+ INITIAL_LOSS_SCALE = 'init_scale'
29
+ SCALE_WINDOW = 'scale_window'
30
+ DELAYED_SHIFT = 'delayed_shift'
31
+ CONSECUTIVE_HYSTERESIS = 'consecutive_hysteresis'
32
+ MIN_LOSS_SCALE = 'min_scale'
33
+
34
+
35
+ # item() is a recent addition, so this helps with backward compatibility.
36
+ def to_python_float(t):
37
+ if hasattr(t, 'item'):
38
+ return t.item()
39
+ return t[0]
40
+
41
+
42
+ class LossScalerBase:
43
+ """LossScalarBase
44
+ Base class for a loss scaler
45
+ """
46
+
47
+ def __init__(self, cur_scale):
48
+ self.cur_scale = cur_scale
49
+ self.dynamic = False
50
+
51
+ @property
52
+ def loss_scale(self):
53
+ return self.cur_scale
54
+
55
+ def scale_gradient(self, module, grad_in, grad_out):
56
+ return tuple(self.loss_scale * g for g in grad_in)
57
+
58
+ def update_scale(self, overflow):
59
+ pass
60
+
61
+ def backward(self, loss, retain_graph=False):
62
+ scaled_loss = loss * self.loss_scale
63
+ scaled_loss.backward(retain_graph=retain_graph)
64
+ # print(f'LossScalerBackward: {scaled_loss=}')
65
+
66
+
67
+ class LossScaler(LossScalerBase):
68
+ """
69
+ Class that manages a static loss scale. This class is intended to interact with
70
+ :class:`FP16_Optimizer`, and should not be directly manipulated by the user.
71
+
72
+ Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
73
+ :class:`FP16_Optimizer`'s constructor.
74
+
75
+ Args:
76
+ scale (float, optional, default=1.0): The loss scale.
77
+ """
78
+
79
+ def __init__(self, scale=1):
80
+ super(LossScaler, self).__init__(scale)
81
+
82
+ # `params` is a list / generator of torch.Variable
83
+ def has_overflow(self, params):
84
+ return False
85
+
86
+ # `x` is a torch.Tensor
87
+ def _has_inf_or_nan(x):
88
+ return False
89
+
90
+
91
+ class DynamicLossScaler(LossScalerBase):
92
+ """
93
+ Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
94
+ indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
95
+ :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
96
+ operates, because the default options can be changed using the
97
+ the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
98
+
99
+ Loss scaling is designed to combat the problem of underflowing gradients encountered at long
100
+ times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
101
+ scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
102
+ encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
103
+ occurred.
104
+ :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
105
+ and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
106
+ If a certain number of iterations occur without overflowing gradients detected,
107
+ :class:`DynamicLossScaler` increases the loss scale once more.
108
+ In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
109
+ always using the highest loss scale possible without incurring overflow.
110
+
111
+ Args:
112
+ init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
113
+ scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
114
+ scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
115
+ consecutive_hysteresis (bool, optional, default=False): Whether to refill hysteresis if we reach an iteration that doesn't overflow
116
+ """
117
+
118
+ def __init__(self,
119
+ init_scale=2**32,
120
+ scale_factor=2.,
121
+ scale_window=1000,
122
+ min_scale=1,
123
+ delayed_shift=1,
124
+ consecutive_hysteresis=False,
125
+ raise_error_at_min_scale=True,
126
+ dtype=torch.half):
127
+ super(DynamicLossScaler, self).__init__(init_scale)
128
+ self.cur_iter = 0
129
+ self.last_overflow_iter = -1
130
+ self.scale_factor = scale_factor
131
+ self.scale_window = scale_window
132
+ self.min_scale = min_scale
133
+ self.delayed_shift = delayed_shift
134
+ self.cur_hysteresis = delayed_shift
135
+ self.consecutive_hysteresis = consecutive_hysteresis
136
+ self.raise_error_at_min_scale = raise_error_at_min_scale
137
+ self.dynamic = True
138
+ self.dtype = dtype
139
+
140
+ # `params` is a list / generator of torch.Variable
141
+ def has_overflow_serial(self, params):
142
+ for p in params:
143
+ if p.grad is not None and self._has_inf_or_nan(p.grad.data):
144
+ return True
145
+
146
+ return False
147
+
148
+ # `x` is a torch.Tensor
149
+ def _has_inf_or_nan(x):
150
+ try:
151
+ # if x is half, the .float() incurs an additional deep copy, but it's necessary if
152
+ # Pytorch's .sum() creates a one-element tensor of the same type as x
153
+ # (which is true for some recent version of pytorch).
154
+ cpu_sum = float(x.float().sum())
155
+ # More efficient version that can be used if .sum() returns a Python scalar
156
+ # cpu_sum = float(x.sum())
157
+ except RuntimeError as instance:
158
+ # We want to check if inst is actually an overflow exception.
159
+ # RuntimeError could come from a different error.
160
+ # If so, we still want the exception to propagate.
161
+ if "value cannot be converted" not in instance.args[0]:
162
+ raise
163
+ return True
164
+ else:
165
+ if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum:
166
+ return True
167
+ return False
168
+
169
+ # `overflow` is boolean indicating whether the gradient overflowed
170
+ def update_scale(self, overflow):
171
+ if overflow:
172
+ # self.cur_scale /= self.scale_factor
173
+ if self.delayed_shift == 1 or self.cur_hysteresis == 1:
174
+ if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:
175
+ raise Exception(
176
+ "Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.")
177
+ else:
178
+ next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)
179
+ if dist.get_rank() == 0:
180
+ overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
181
+ if self.dtype == torch.half:
182
+ overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}"
183
+ logger.info(overflow_msg)
184
+ self.cur_scale = next_scale
185
+ else:
186
+ if dist.get_rank() == 0:
187
+ overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
188
+ if self.dtype == torch.half:
189
+ overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}"
190
+ logger.info(overflow_msg)
191
+ self.cur_hysteresis -= 1
192
+ self.last_overflow_iter = self.cur_iter
193
+ else:
194
+ if self.consecutive_hysteresis:
195
+ if dist.get_rank() == 0:
196
+ hysteresis_msg = f"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}"
197
+ logger.info(hysteresis_msg)
198
+ self.cur_hysteresis = self.delayed_shift
199
+ if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
200
+ if not self.consecutive_hysteresis:
201
+ self.cur_hysteresis = self.delayed_shift
202
+ self.cur_scale *= self.scale_factor
203
+ self.cur_iter += 1
204
+
205
+
206
+ # Although loss scaling is only defined for fp16, yet for backwards compatibility
207
+ # we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling.
208
+ def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args):
209
+ if dtype == torch.half and dynamic_scaling:
210
+ if dynamic_loss_args is None:
211
+ return DynamicLossScaler(dtype=dtype)
212
+ return DynamicLossScaler(dtype=dtype, **dynamic_loss_args)
213
+
214
+ loss_scale_value = static_loss_scale if dtype == torch.half else 1.0
215
+ return LossScaler(scale=loss_scale_value)
216
+
217
+
218
+ ##############################################################
219
+ # Example usage below here -- assuming it's in a separate file
220
+ ##############################################################
221
+ """
222
+ TO-DO separate out into an example.
223
+ if __name__ == "__main__":
224
+ import torch
225
+ from torch.autograd import Variable
226
+ from dynamic_loss_scaler import DynamicLossScaler
227
+
228
+ # N is batch size; D_in is input dimension;
229
+ # H is hidden dimension; D_out is output dimension.
230
+ N, D_in, H, D_out = 64, 1000, 100, 10
231
+
232
+ # Create random Tensors to hold inputs and outputs, and wrap them in Variables.
233
+ x = Variable(torch.randn(N, D_in), requires_grad=False)
234
+ y = Variable(torch.randn(N, D_out), requires_grad=False)
235
+
236
+ w1 = Variable(torch.randn(D_in, H), requires_grad=True)
237
+ w2 = Variable(torch.randn(H, D_out), requires_grad=True)
238
+ parameters = [w1, w2]
239
+
240
+ learning_rate = 1e-6
241
+ optimizer = torch.optim.SGD(parameters, lr=learning_rate)
242
+ loss_scaler = DynamicLossScaler()
243
+
244
+ for t in range(500):
245
+ y_pred = x.mm(w1).clamp(min=0).mm(w2)
246
+ loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
247
+ print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
248
+ print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
249
+ print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
250
+
251
+ # Run backprop
252
+ optimizer.zero_grad()
253
+ loss.backward()
254
+
255
+ # Check for overflow
256
+ has_overflow = DynamicLossScaler.has_overflow(parameters)
257
+
258
+ # If no overflow, unscale grad and update as usual
259
+ if not has_overflow:
260
+ for param in parameters:
261
+ param.grad.data.mul_(1. / loss_scaler.loss_scale)
262
+ optimizer.step()
263
+ # Otherwise, don't do anything -- ie, skip iteration
264
+ else:
265
+ print('fp16 dynamic loss scale overflow!')
266
+
267
+ # Update loss scale for next iteration
268
+ loss_scaler.update_scale(has_overflow)
269
+
270
+ """
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .adam import OnebitAdam
7
+ from .lamb import OnebitLamb
8
+ from .zoadam import ZeroOneAdam
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (317 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc ADDED
Binary file (8.41 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import types
7
+ import torch
8
+ import numpy as np
9
+ from deepspeed.accelerator import get_accelerator
10
+ from deepspeed.utils.torch import required_torch_version
11
+ from deepspeed import comm as dist
12
+
13
+
14
+ class OnebitAdam(torch.optim.Optimizer):
15
+ """Implements the 1-bit Adam algorithm. Currently GPU-only.
16
+ For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/
17
+ For technical details please read https://arxiv.org/abs/2102.02888
18
+
19
+ Arguments:
20
+ params (iterable): iterable of parameters to optimize or dicts defining
21
+ parameter groups.
22
+ lr (float, optional): learning rate. (default: 1e-3)
23
+ freeze_step (int, optional): Number of steps for warmup (uncompressed)
24
+ stage before we start using compressed communication. (default 100000)
25
+ betas (Tuple[float, float], optional): coefficients used for computing
26
+ running averages of gradient and its square. (default: (0.9, 0.999))
27
+ eps (float, optional): term added to the denominator to improve
28
+ numerical stability. (default: 1e-8)
29
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
30
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
31
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
32
+ (default: False) NOT SUPPORTED in 1-bit Adam!
33
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
34
+ adds eps to the bias-corrected second moment estimate before
35
+ evaluating square root instead of adding it to the square root of
36
+ second moment estimate as in the original paper. (default: False)
37
+ cuda_aware (boolean, required): Set True if the underlying MPI implementation
38
+ supports CUDA-Aware communication. (default: False)
39
+ comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
40
+ .. _Adam\\: A Method for Stochastic Optimization:
41
+ https://arxiv.org/abs/1412.6980
42
+ .. _On the Convergence of Adam and Beyond:
43
+ https://openreview.net/forum?id=ryQu7f-RZ
44
+ """
45
+
46
+ def __init__(self,
47
+ params,
48
+ deepspeed=None,
49
+ lr=1e-3,
50
+ freeze_step=100000,
51
+ bias_correction=True,
52
+ betas=(0.9, 0.999),
53
+ eps=1e-8,
54
+ eps_inside_sqrt=False,
55
+ weight_decay=0.,
56
+ max_grad_norm=0.,
57
+ amsgrad=False,
58
+ cuda_aware=False,
59
+ comm_backend_name='nccl'):
60
+
61
+ if amsgrad:
62
+ raise RuntimeError('1-bit Adam does not support the AMSGrad variant.')
63
+
64
+ defaults = dict(lr=lr,
65
+ bias_correction=bias_correction,
66
+ betas=betas,
67
+ eps=eps,
68
+ weight_decay=weight_decay,
69
+ max_grad_norm=max_grad_norm)
70
+
71
+ super(OnebitAdam, self).__init__(params, defaults)
72
+ self.eps_mode = 0 if eps_inside_sqrt else 1
73
+ self.comm_time = 0.0
74
+ self.step_time = 0.0
75
+ self.ave_step = 1
76
+ self.bk_time = 0.0
77
+
78
+ self.deepspeed = deepspeed
79
+ self.adam_freeze_key = False
80
+ self.initialize = False
81
+ self.freeze_step = freeze_step
82
+ self.cuda_aware = cuda_aware
83
+ self.using_pipeline = False
84
+
85
+ self.comm_backend_name = comm_backend_name
86
+
87
+ assert dist.is_initialized(), "Please initialize the torch distributed backend."
88
+ # Empty initializer. Set handle based on the comm backend as follows.
89
+ self.comm_backend_handle = None
90
+ if self.comm_backend_name == 'nccl':
91
+ assert (
92
+ required_torch_version(min_version=1.8)
93
+ ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
94
+ from deepspeed.runtime.comm.nccl import NcclBackend
95
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
96
+ self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
97
+ elif self.comm_backend_name == 'mpi':
98
+ from deepspeed.runtime.comm.mpi import MpiBackend
99
+ self.comm_backend_handle = MpiBackend(cuda_aware)
100
+ elif self.comm_backend_name == 'hccl':
101
+ from deepspeed.runtime.comm.hccl import HcclBackend
102
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
103
+ self.comm_backend_handle = HcclBackend(self.deepspeed.mpu)
104
+ self.size = self.comm_backend_handle.size
105
+
106
+ self.divider = int(self.size * 8 / np.gcd(self.size, 8))
107
+
108
+ def step(self, closure=None, grads=None):
109
+ """Performs a single optimization step.
110
+ Arguments:
111
+ closure (callable, optional): A closure that reevaluates the model
112
+ and returns the loss.
113
+ grads (list of tensors, optional): weight gradient to use for the
114
+ optimizer update. If gradients have type torch.half, parameters
115
+ are expected to be in type torch.float. (default: None)
116
+ output params (list of tensors, optional): A reduced precision copy
117
+ of the updated weights written out in addition to the regular
118
+ updated weights. Have to be of same type as gradients. (default: None)
119
+ scale (float, optional): factor to divide gradient tensor values
120
+ by before applying to weights. (default: 1)
121
+ """
122
+ loss = None
123
+ if closure is not None:
124
+ loss = closure()
125
+
126
+ gather_time = 0
127
+ allgather_time = 0
128
+ all_time = 0
129
+
130
+ if self.adam_freeze_key is False:
131
+ v_diff_buffer = 0.0
132
+
133
+ if grads is None:
134
+ grads_group = [None] * len(self.param_groups)
135
+ # backward compatibility
136
+ # assuming a list/generator of parameter means single group
137
+ elif isinstance(grads, types.GeneratorType):
138
+ grads_group = [grads]
139
+ elif type(grads[0]) != list:
140
+ grads_group = [grads]
141
+ else:
142
+ grads_group = grads
143
+
144
+ for group, grads_this_group in zip(self.param_groups, grads_group):
145
+ if grads_this_group is None:
146
+ grads_this_group = [None] * len(group['params'])
147
+
148
+ bias_correction = 1 if group['bias_correction'] else 0
149
+
150
+ for p, grad in zip(group['params'], grads_this_group):
151
+ if p.grad is None and grad is None:
152
+ continue
153
+ if grad is None:
154
+ grad = p.grad.data
155
+ if grad.is_sparse:
156
+ raise RuntimeError('1-bit Adam does not support sparse gradients')
157
+
158
+ state = self.state[p]
159
+
160
+ # State initialization
161
+ if len(state) == 0:
162
+ state['step'] = 0
163
+ # Exponential moving average of gradient values
164
+ state['exp_avg'] = torch.zeros_like(p.data)
165
+ # Exponential moving average of squared gradient values
166
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
167
+
168
+ if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()):
169
+ state['tensor_size'] = torch.numel(p.data)
170
+ state['corrected_tensor_size'] = state['tensor_size']
171
+
172
+ if state['tensor_size'] % (self.size * self.divider) != 0:
173
+ state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
174
+ (self.size * self.divider)))
175
+ state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
176
+ get_accelerator().empty_cache()
177
+ state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
178
+ state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
179
+ get_accelerator().empty_cache()
180
+ self.adam_freeze_key = True
181
+ if not self.initialize and dist.get_rank() == 0:
182
+ print("Cupy Buffers Initialized Successfully.")
183
+
184
+ exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
185
+ beta1, beta2 = group['betas']
186
+
187
+ state['step'] += 1
188
+
189
+ if self.adam_freeze_key is False:
190
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
191
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
192
+ grad = None
193
+ if self.initialize:
194
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
195
+
196
+ else:
197
+ if 'non_freeze' in group.keys() and group['non_freeze'] is True:
198
+ dist.all_reduce(grad)
199
+ grad.mul_(1 / dist.get_world_size())
200
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
201
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
202
+ grad = None
203
+ else:
204
+ if self.initialize is True:
205
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
206
+ grad = None
207
+
208
+ if self.size > 1:
209
+ exp_avg.set_(
210
+ self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'],
211
+ state['server_error'],
212
+ self.deepspeed.local_rank))
213
+ # Because 1-bit compression cannot represent exact zero, it is required to
214
+ # provide a momentum mask for those params that have constant exact zeros in their
215
+ # momentums, otherwise the compression error would keep accumulating.
216
+ # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
217
+ # always have exact zeros in its momentum for row 129 to 512, because it only
218
+ # learns up to seq length 128 while the model supports up to 512 seq length.
219
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
220
+ if 'exp_avg_mask' in group:
221
+ if exp_avg.device != group['exp_avg_mask'].device:
222
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
223
+ exp_avg.mul_(group['exp_avg_mask'])
224
+
225
+ if self.initialize:
226
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
227
+
228
+ if self.initialize:
229
+ if group['weight_decay'] > 0.0:
230
+ update += group['weight_decay'] * p.data
231
+ with torch.no_grad():
232
+ p.add_(-group['lr'] * update)
233
+
234
+ if not self.initialize:
235
+ print('Pop out errors', flush=True)
236
+ state.pop('worker_error')
237
+ state.pop('server_error')
238
+
239
+ if not self.initialize:
240
+ self.adam_freeze_key = False
241
+ self.initialize = True
242
+ print(f"Finished the initialization step at rank {dist.get_rank()}")
243
+ return loss
244
+
245
+ if self.adam_freeze_key is False:
246
+ if state['step'] >= self.freeze_step:
247
+ print('OnebitAdam - starting compressed communication')
248
+ self.adam_freeze_key = True
249
+ if self.using_pipeline:
250
+ self.deepspeed.pipeline_enable_backward_allreduce = False
251
+ else:
252
+ self.deepspeed.enable_backward_allreduce = False
253
+
254
+ return loss
255
+
256
+ def load_state_dict(self, state_dict):
257
+ """
258
+ Overrides load_state_dict() to add special handling when loading checkpoints
259
+ """
260
+ # Because at different stage exp_avg_mask may change (e.g.,
261
+ # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
262
+ # in checkpoints but always use the one user provided in training script.
263
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
264
+ # Thus here we keep the exp_avg_mask unchanged when loading checkpoint
265
+ for i, group in enumerate(self.param_groups):
266
+ if 'exp_avg_mask' in group:
267
+ state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
268
+ elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
269
+ state_dict['param_groups'][i].pop('exp_avg_mask')
270
+ super().load_state_dict(state_dict)
271
+ if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
272
+ if dist.get_rank() == 0:
273
+ print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.")
274
+ if self.adam_freeze_key is True:
275
+ self.adam_freeze_key = False
276
+ if self.using_pipeline:
277
+ self.deepspeed.pipeline_enable_backward_allreduce = True
278
+ else:
279
+ self.deepspeed.enable_backward_allreduce = True
280
+ else:
281
+ if dist.get_rank() == 0:
282
+ print("Checkpoint loaded and OnebitAdam compression stage starts/continues.")
283
+ if self.adam_freeze_key is False:
284
+ self.adam_freeze_key = True
285
+ if self.using_pipeline:
286
+ self.deepspeed.pipeline_enable_backward_allreduce = False
287
+ else:
288
+ self.deepspeed.enable_backward_allreduce = False
289
+ # We reset the compression errors when loading checkpoints for 3 reasons:
290
+ # 1) The worker and server error at each GPU are distinct, so in current implementation
291
+ # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
292
+ # If we want to save them correctly we need O(num_gpu*model_size) memory in order to
293
+ # gather all the error, which is a very large memory requirement. It's possible to save
294
+ # them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
295
+ # 2) Even if we are able to save the compression errors correctly, you need to have the
296
+ # exact same number of GPUs in order to load them correctly.
297
+ # 3) We verified on BERT pre-training that occasionally resetting the compression error
298
+ # at checkpoint loading does not affect the convergence.
299
+ # However, please avoid frequent checkpoint loading which could break the error
300
+ # compensation mechanism thus affect the convergence.
301
+ for group in self.param_groups:
302
+ for p in group['params']:
303
+ if 'worker_error' in self.state[p]:
304
+ self.state[p].pop('worker_error')
305
+ if 'server_error' in self.state[p]:
306
+ self.state[p].pop('server_error')
venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import types
7
+ import torch
8
+ import numpy as np
9
+ from deepspeed import comm as dist
10
+ from deepspeed.utils.torch import required_torch_version
11
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
12
+ from deepspeed.accelerator import get_accelerator
13
+
14
+
15
+ class OnebitLamb(torch.optim.Optimizer):
16
+ """Implements the 1-bit Lamb algorithm. Currently GPU-only.
17
+ For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/
18
+ For technical details please see our paper https://arxiv.org/abs/2104.06069.
19
+
20
+ Arguments:
21
+ params (iterable): iterable of parameters to optimize or dicts defining
22
+ parameter groups.
23
+ lr (float, optional): learning rate. (default: 1e-3)
24
+ freeze_step (int, optional): Number of steps for warmup (uncompressed)
25
+ stage before we start using compressed communication. (default 100000)
26
+ betas (Tuple[float, float], optional): coefficients used for computing
27
+ running averages of gradient and its square. (default: (0.9, 0.999))
28
+ eps (float, optional): term added to the denominator to improve
29
+ numerical stability. (default: 1e-8)
30
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
31
+ max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
32
+ min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
33
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
34
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
35
+ (default: False) NOT SUPPORTED in 1-bit Lamb!
36
+ eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
37
+ adds eps to the bias-corrected second moment estimate before
38
+ evaluating square root instead of adding it to the square root of
39
+ second moment estimate as in the original paper. (default: False)
40
+ cuda_aware (boolean, required): Set True if the underlying MPI implementation
41
+ supports CUDA-Aware communication. (default: False)
42
+ comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
43
+ coeff_beta (float, optional): coefficient used for computing
44
+ running averages of lamb coefficient (default: 0.9) note that you may want to
45
+ increase or decrease this beta depending on the freeze_step you choose, as
46
+ 1/(1 - coeff_beta) should be smaller than or equal to freeze_step
47
+ factor_max (float, optional): maximum value of scaling factor to the frozen lamb
48
+ coefficient during compression stage (default: 4.0)
49
+ factor_min (float, optional): minimum value of scaling factor to the frozen lamb
50
+ coefficient during compression stage (default: 0.5)
51
+ factor_threshold (float, optional): threshold of how much the scaling factor can
52
+ fluctuate between steps (default: 0.1)
53
+ .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:
54
+ https://arxiv.org/abs/1904.00962
55
+ .. _Adam\\: A Method for Stochastic Optimization:
56
+ https://arxiv.org/abs/1412.6980
57
+ .. _On the Convergence of Adam and Beyond:
58
+ https://openreview.net/forum?id=ryQu7f-RZ
59
+ """
60
+
61
+ def __init__(self,
62
+ params,
63
+ deepspeed=None,
64
+ lr=1e-3,
65
+ freeze_step=100000,
66
+ bias_correction=True,
67
+ betas=(0.9, 0.999),
68
+ eps=1e-8,
69
+ eps_inside_sqrt=False,
70
+ weight_decay=0.,
71
+ max_grad_norm=0.,
72
+ max_coeff=10.0,
73
+ min_coeff=0.01,
74
+ amsgrad=False,
75
+ cuda_aware=False,
76
+ comm_backend_name='nccl',
77
+ coeff_beta=0.9,
78
+ factor_max=4.0,
79
+ factor_min=0.5,
80
+ factor_threshold=0.1):
81
+
82
+ if amsgrad:
83
+ raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.')
84
+
85
+ defaults = dict(lr=lr,
86
+ bias_correction=bias_correction,
87
+ betas=betas,
88
+ eps=eps,
89
+ weight_decay=weight_decay,
90
+ max_grad_norm=max_grad_norm,
91
+ max_coeff=max_coeff,
92
+ min_coeff=min_coeff)
93
+
94
+ super(OnebitLamb, self).__init__(params, defaults)
95
+ self.eps_mode = 0 if eps_inside_sqrt else 1
96
+ self.deepspeed = deepspeed
97
+ self.lamb_freeze_key = False
98
+ self.initialize = False
99
+ self.freeze_step = freeze_step
100
+ self.cuda_aware = cuda_aware
101
+ self.coeff_beta = coeff_beta
102
+ self.factor_max = factor_max
103
+ self.factor_min = factor_min
104
+ self.factor_threshold = factor_threshold
105
+ self.using_pipeline = False
106
+
107
+ self.comm_backend_name = comm_backend_name
108
+
109
+ assert dist.is_initialized(), "Please initialize the torch distributed backend."
110
+ # Empty initializer. Set handle based on the comm backend as follows.
111
+ self.comm_backend_handle = None
112
+ if self.comm_backend_name == 'nccl':
113
+ assert (
114
+ required_torch_version(min_version=1.8)
115
+ ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
116
+ from deepspeed.runtime.comm.nccl import NcclBackend
117
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
118
+ self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
119
+ elif self.comm_backend_name == 'mpi':
120
+ from deepspeed.runtime.comm.mpi import MpiBackend
121
+ self.comm_backend_handle = MpiBackend(cuda_aware)
122
+ elif self.comm_backend_name == 'hccl':
123
+ from deepspeed.runtime.comm.hccl import HcclBackend
124
+ self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
125
+ self.comm_backend_handle = HcclBackend(self.deepspeed.mpu)
126
+
127
+ self.size = self.comm_backend_handle.size
128
+
129
+ self.divider = int(self.size * 8 / np.gcd(self.size, 8))
130
+
131
+ self.exp_avg_flat = []
132
+ self.dummy_exp_avg = {}
133
+ self.corrected_tensor_sizes = []
134
+ self.server_chunk_sizes = []
135
+ self.worker_errors = []
136
+ self.server_errors = []
137
+
138
+ self.lamb_coeffs = []
139
+
140
+ def step(self, closure=None, grads=None):
141
+ """Performs a single optimization step.
142
+ Arguments:
143
+ closure (callable, optional): A closure that reevaluates the model
144
+ and returns the loss.
145
+ grads (list of tensors, optional): weight gradient to use for the
146
+ optimizer update. If gradients have type torch.half, parameters
147
+ are expected to be in type torch.float. (default: None)
148
+ """
149
+ loss = None
150
+ if closure is not None:
151
+ loss = closure()
152
+
153
+ if grads is None:
154
+ grads_group = [None] * len(self.param_groups)
155
+ # backward compatibility
156
+ # assuming a list/generator of parameter means single group
157
+ elif isinstance(grads, types.GeneratorType):
158
+ grads_group = [grads]
159
+ elif type(grads[0]) != list:
160
+ grads_group = [grads]
161
+ else:
162
+ grads_group = grads
163
+
164
+ # remove the previous stats
165
+ del self.lamb_coeffs[:]
166
+
167
+ if self.lamb_freeze_key:
168
+ exp_avg_last_step = []
169
+ for group in self.param_groups:
170
+ exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']])
171
+ if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]:
172
+ # Compute the scaling_coeff for each momentum at the end of warmup stage.
173
+ # This is used to reduce compression error during compression stage.
174
+ momentum_scales = []
175
+ for group in self.param_groups:
176
+ momentum_scales.append([(torch.linalg.norm(self.state[p]['exp_avg']) /
177
+ np.sqrt(torch.numel(self.state[p]['exp_avg']))).item()
178
+ for p in group['params']])
179
+ united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales])
180
+ for i, group in enumerate(self.param_groups):
181
+ for j, p in enumerate(group['params']):
182
+ self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j]
183
+
184
+ for group, grads_this_group in zip(self.param_groups, grads_group):
185
+ if grads_this_group is None:
186
+ grads_this_group = [None] * len(group['params'])
187
+
188
+ bias_correction = 1 if group['bias_correction'] else 0
189
+
190
+ for p, grad in zip(group['params'], grads_this_group):
191
+ if p.grad is None and grad is None:
192
+ continue
193
+ if grad is None:
194
+ grad = p.grad.data
195
+ if grad.is_sparse:
196
+ raise RuntimeError('1-bit Lamb does not support sparse gradients')
197
+
198
+ state = self.state[p]
199
+
200
+ # State initialization
201
+ if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()):
202
+ state['step'] = 0
203
+ state['lamb_coeff_freeze'] = 0.0
204
+ state['last_factor'] = 1.0
205
+ # Exponential moving average of gradient values
206
+ state['exp_avg'] = torch.zeros_like(p.data)
207
+ # Exponential moving average of squared gradient values
208
+ state['exp_avg_sq'] = torch.zeros_like(p.data)
209
+ state['exp_avg_sq_fresh'] = torch.zeros_like(p.data)
210
+
211
+ if not self.initialize:
212
+ self.lamb_freeze_key = True
213
+
214
+ exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
215
+ 'exp_avg_sq_fresh']
216
+ beta1, beta2 = group['betas']
217
+ max_coeff = group['max_coeff']
218
+ min_coeff = group['min_coeff']
219
+
220
+ state['step'] += 1
221
+
222
+ if self.lamb_freeze_key is False:
223
+ # warmup stage, baseline Lamb optimization
224
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
225
+ exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
226
+ if state['step'] == self.freeze_step:
227
+ exp_avg_sq_fresh.data = exp_avg_sq.detach().clone()
228
+ grad = None
229
+ if self.initialize:
230
+ weight_norm = p.data.pow(2).sum().sqrt()
231
+ update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
232
+ if group['weight_decay'] > 0.0:
233
+ update += group['weight_decay'] * p.data
234
+ update_norm = update.pow(2).sum().sqrt()
235
+ lamb_coeff = 1.0
236
+ if weight_norm != 0 and update_norm != 0:
237
+ lamb_coeff = (weight_norm / update_norm).item()
238
+ if lamb_coeff > max_coeff:
239
+ lamb_coeff = max_coeff
240
+ if lamb_coeff < min_coeff:
241
+ lamb_coeff = min_coeff
242
+ if lamb_coeff != 1.0:
243
+ state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + (
244
+ 1 - self.coeff_beta) * lamb_coeff
245
+ self.lamb_coeffs.append(lamb_coeff)
246
+ with torch.no_grad():
247
+ p.add_(-group['lr'] * lamb_coeff * update)
248
+ else:
249
+ # compression stage, update each momentum locally, then
250
+ # communicate based on the compressed_allreduce below
251
+ if self.initialize:
252
+ exp_avg.mul_(beta1).add_(1 - beta1, grad)
253
+ exp_avg.mul_(self.state[p]['scaling_coeff'])
254
+ grad = None
255
+
256
+ # init fused momentum
257
+ if len(self.exp_avg_flat) == 0:
258
+ momentum_groups = []
259
+ tensor_size = 0
260
+ for group in self.param_groups:
261
+ for p in group['params']:
262
+ momentum_groups.append(self.state[p]['exp_avg'])
263
+ tensor_size += torch.numel(p.data)
264
+ corrected_tensor_size = tensor_size
265
+ if tensor_size % (self.size * self.divider) != 0:
266
+ difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider)))
267
+ corrected_tensor_size += difference
268
+ self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device)
269
+ momentum_groups.append(self.dummy_exp_avg[0])
270
+ self.corrected_tensor_sizes.append(corrected_tensor_size)
271
+ self.server_chunk_sizes.append(corrected_tensor_size // self.size)
272
+
273
+ self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups]))
274
+ updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups)
275
+ for p, q in zip(momentum_groups, updated_params):
276
+ p.data = q.data
277
+
278
+ if self.initialize and len(self.worker_errors) == 0:
279
+ get_accelerator().empty_cache()
280
+ for i in range(len(self.exp_avg_flat)):
281
+ self.worker_errors.append(
282
+ torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
283
+ self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
284
+ get_accelerator().empty_cache()
285
+
286
+ if self.lamb_freeze_key:
287
+ if self.size > 1:
288
+ for i in range(len(self.exp_avg_flat)):
289
+ if not self.initialize:
290
+ get_accelerator().empty_cache()
291
+ self.worker_errors.append(
292
+ torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
293
+ self.server_errors.append(
294
+ torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
295
+ get_accelerator().empty_cache()
296
+ if dist.get_rank() == 0:
297
+ print("Cupy Buffers Initialized Successfully.")
298
+
299
+ self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0],
300
+ self.server_errors[0], self.deepspeed.local_rank)
301
+
302
+ if dist.get_rank() == 0:
303
+ print('Pop out errors', flush=True)
304
+ del self.worker_errors[:]
305
+ del self.server_errors[:]
306
+ else:
307
+ self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i],
308
+ self.server_errors[i], self.deepspeed.local_rank)
309
+
310
+ if self.lamb_freeze_key and self.initialize:
311
+ for i, group in enumerate(self.param_groups):
312
+ bias_correction = 1 if group['bias_correction'] else 0
313
+
314
+ for j, p in enumerate(group['params']):
315
+ state = self.state[p]
316
+ exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
317
+ 'exp_avg_sq_fresh']
318
+ beta1, beta2 = group['betas']
319
+ exp_avg.div_(self.state[p]['scaling_coeff'])
320
+ # Because 1-bit compression cannot represent exact zero, it is required to
321
+ # provide a momentum mask for those params that have constant exact zeros in their
322
+ # momentums, otherwise the compression error would keep accumulating.
323
+ # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
324
+ # always have exact zeros in its momentum for row 129 to 512, because it only
325
+ # learns up to seq length 128 while the model supports up to 512 seq length.
326
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how
327
+ # to add this exp_avg_mask for BERT pre-training.)
328
+ if 'exp_avg_mask' in group:
329
+ if exp_avg.device != group['exp_avg_mask'].device:
330
+ group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
331
+ exp_avg.mul_(group['exp_avg_mask'])
332
+
333
+ grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1))
334
+ exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct)
335
+ denom = exp_avg_sq.sqrt() + group['eps']
336
+ update_prelim = exp_avg / denom
337
+
338
+ if group['weight_decay'] > 0.0:
339
+ update = update_prelim + group['weight_decay'] * p.data
340
+ else:
341
+ update = update_prelim
342
+
343
+ lamb_coeff = 1.0
344
+ update_norm = update.pow(2).sum().sqrt()
345
+ denom_real = exp_avg_sq_fresh.sqrt() + group['eps']
346
+ factor = (denom / denom_real).max().item()
347
+ if group['weight_decay'] > 0.0:
348
+ update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item())
349
+ factor = factor * update_ratio + (1.0 - update_ratio)
350
+ if factor > self.factor_max:
351
+ factor = self.factor_max
352
+ if factor < self.factor_min:
353
+ factor = self.factor_min
354
+ if factor > state['last_factor'] * (1.0 + self.factor_threshold):
355
+ factor = state['last_factor'] * (1.0 + self.factor_threshold)
356
+ if factor < state['last_factor'] * (1.0 - self.factor_threshold):
357
+ factor = state['last_factor'] * (1.0 - self.factor_threshold)
358
+ state['last_factor'] = factor
359
+ lamb_coeff = state['lamb_coeff_freeze'] * factor
360
+ self.lamb_coeffs.append(lamb_coeff)
361
+ with torch.no_grad():
362
+ p.add_(-group['lr'] * lamb_coeff * update)
363
+ del exp_avg_last_step[:]
364
+ exp_avg_last_step = None
365
+
366
+ if not self.initialize:
367
+ self.lamb_freeze_key = False
368
+ self.initialize = True
369
+ print(f"Finished the initialization step at rank {dist.get_rank()}")
370
+ return loss
371
+
372
+ if self.lamb_freeze_key is False:
373
+ if state['step'] >= self.freeze_step:
374
+ print('OnebitLamb - starting compressed communication')
375
+ self.lamb_freeze_key = True
376
+ if self.using_pipeline:
377
+ self.deepspeed.pipeline_enable_backward_allreduce = False
378
+ else:
379
+ self.deepspeed.enable_backward_allreduce = False
380
+
381
+ return loss
382
+
383
+ def load_state_dict(self, state_dict):
384
+ """
385
+ Overrides load_state_dict() to add special handling when loading checkpoints
386
+ """
387
+ # Because at different stage exp_avg_mask may change (e.g.,
388
+ # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
389
+ # in checkpoints but always use the one user provided in training script.
390
+ # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
391
+ # Thus here we keep the exp_avg_mask unchanged when loading checkpoint
392
+ for i, group in enumerate(self.param_groups):
393
+ if 'exp_avg_mask' in group:
394
+ state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
395
+ elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
396
+ state_dict['param_groups'][i].pop('exp_avg_mask')
397
+ super().load_state_dict(state_dict)
398
+ # need to reset the fused momentum since loading states will break the linking
399
+ del self.exp_avg_flat[:]
400
+ self.dummy_exp_avg.clear()
401
+ del self.corrected_tensor_sizes[:]
402
+ del self.server_chunk_sizes[:]
403
+ if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
404
+ if dist.get_rank() == 0:
405
+ print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.")
406
+ if self.lamb_freeze_key is True:
407
+ self.lamb_freeze_key = False
408
+ if self.using_pipeline:
409
+ self.deepspeed.pipeline_enable_backward_allreduce = True
410
+ else:
411
+ self.deepspeed.enable_backward_allreduce = True
412
+ for group in self.param_groups:
413
+ for p in group['params']:
414
+ self.state[p]['lamb_coeff_freeze'] = 0.0
415
+ self.state[p]['last_factor'] = 1.0
416
+ if 'scaling_coeff' in self.state[p]:
417
+ self.state[p].pop('scaling_coeff')
418
+ else:
419
+ if dist.get_rank() == 0:
420
+ print("Checkpoint loaded and OnebitLamb compression stage starts/continues.")
421
+ if self.lamb_freeze_key is False:
422
+ self.lamb_freeze_key = True
423
+ if self.using_pipeline:
424
+ self.deepspeed.pipeline_enable_backward_allreduce = False
425
+ else:
426
+ self.deepspeed.enable_backward_allreduce = False
427
+ # We reset the compression errors when loading checkpoints for 3 reasons:
428
+ # 1) The worker and server error at each GPU are distinct, so in current implementation
429
+ # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
430
+ # If we want to save them correctly we need O(num_gpu*model_size) memory in order to
431
+ # gather all the error, which is a very large memory requirement. It's possible to save
432
+ # them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
433
+ # 2) Even if we are able to save the compression errors correctly, you need to have the
434
+ # exact same number of GPUs in order to load them correctly.
435
+ # 3) We verified on BERT pre-training that occasionally resetting the compression error
436
+ # at checkpoint loading does not affect the convergence.
437
+ # However, please avoid frequent checkpoint loading which could break the error
438
+ # compensation mechanism thus affect the convergence.
439
+ del self.worker_errors[:]
440
+ del self.server_errors[:]
441
+
442
+ def get_lamb_coeffs(self):
443
+ return self.lamb_coeffs