applied-ai-018 commited on
Commit
6015caf
·
verified ·
1 Parent(s): 7ef6853

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__init__.py +23 -0
  2. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/arguments.cpython-310.pyc +0 -0
  3. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/checkpointing.cpython-310.pyc +0 -0
  4. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/dist_signal_handler.cpython-310.pyc +0 -0
  5. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/global_vars.cpython-310.pyc +0 -0
  6. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/profiler.cpython-310.pyc +0 -0
  7. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/timers.cpython-310.pyc +0 -0
  8. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/training.cpython-310.pyc +0 -0
  9. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/checkpointing.py +797 -0
  10. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/README.md +1 -0
  11. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/enums.cpython-310.pyc +0 -0
  12. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/utils.cpython-310.pyc +0 -0
  13. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/enums.py +9 -0
  14. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/__init__.py +0 -0
  15. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_bias_dropout.py +53 -0
  16. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_layer_norm.py +89 -0
  17. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/model_parallel_config.py +164 -0
  18. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/parallel_state.py +766 -0
  19. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/requirements.txt +6 -0
  20. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__init__.py +70 -0
  21. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/random.cpython-310.pyc +0 -0
  22. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/utils.cpython-310.pyc +0 -0
  23. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/cross_entropy.py +143 -0
  24. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/layers.py +886 -0
  25. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/mappings.py +290 -0
  26. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/random.py +330 -0
  27. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/attention.py +267 -0
  28. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/core_attention.py +146 -0
  29. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/module.py +118 -0
  30. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/transformer_layer.py +122 -0
  31. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/utils.py +171 -0
  32. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/__init__.cpython-310.pyc +0 -0
  33. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/blendable_dataset.cpython-310.pyc +0 -0
  34. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/dataset_utils.cpython-310.pyc +0 -0
  35. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/indexed_dataset.cpython-310.pyc +0 -0
  36. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/vit_dataset.cpython-310.pyc +0 -0
  37. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/gpt_dataset.py +664 -0
  38. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/ict_dataset.py +156 -0
  39. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/orqa_wiki_dataset.py +193 -0
  40. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/realm_dataset_utils.py +200 -0
  41. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/test/test_preprocess_data.sh +10 -0
  42. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/dist_signal_handler.py +81 -0
  43. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/enums.py +34 -0
  44. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__init__.py +121 -0
  45. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__pycache__/__init__.cpython-310.pyc +0 -0
  46. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/compat.h +17 -0
  47. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.cpp +83 -0
  48. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.h +710 -0
  49. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax_cuda.cu +109 -0
  50. docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax.cpp +61 -0
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ import torch
5
+
6
+ from .global_vars import get_args, get_retro_args
7
+ from .global_vars import get_current_global_batch_size
8
+ from .global_vars import get_num_microbatches
9
+ from .global_vars import get_num_eval_microbatches
10
+ from .global_vars import get_num_microbatches_by_mode
11
+ from .global_vars import get_signal_handler
12
+ from .global_vars import update_num_microbatches
13
+ from .global_vars import get_tokenizer
14
+ from .global_vars import get_tensorboard_writer
15
+ from .global_vars import get_adlr_autoresume
16
+ from .global_vars import get_timers
17
+ from .initialize import initialize_megatron
18
+
19
+ from .utils import (print_rank_0,
20
+ is_last_rank,
21
+ print_rank_last,
22
+ is_rank_0,
23
+ is_aml)
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/arguments.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/checkpointing.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/dist_signal_handler.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/global_vars.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/timers.cpython-310.pyc ADDED
Binary file (8.46 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/training.cpython-310.pyc ADDED
Binary file (38.8 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/checkpointing.py ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """Input/output checkpointing."""
5
+
6
+ import os
7
+ import random
8
+ import sys
9
+ import numpy as np
10
+ from deepspeed.accelerator import get_accelerator
11
+ import torch
12
+
13
+ from megatron import update_num_microbatches, get_tokenizer
14
+ from megatron.core import mpu, tensor_parallel
15
+ from .global_vars import get_args
16
+ from tools import verify_checkpoint
17
+ from .utils import (unwrap_model,
18
+ print_rank_0,
19
+ is_rank_0)
20
+
21
+ from deepspeed.checkpoint import (
22
+ ORIGINAL_VOCAB_SIZE,
23
+ PADDED_VOCAB_SIZE,
24
+ UNIVERSAL_CHECKPOINT_INFO,
25
+ UNIVERSAL_CHECKPOINT_VERSION_KEY,
26
+ UNIVERSAL_CHECKPOINT_VERSION_VALUE,
27
+ )
28
+
29
+ _CHECKPOINT_VERSION = None
30
+
31
+
32
+ def set_checkpoint_version(value):
33
+ global _CHECKPOINT_VERSION
34
+ if _CHECKPOINT_VERSION is not None:
35
+ assert _CHECKPOINT_VERSION == value, \
36
+ "checkpoint versions do not match"
37
+ _CHECKPOINT_VERSION = value
38
+
39
+
40
+ def get_checkpoint_version():
41
+ global _CHECKPOINT_VERSION
42
+ return _CHECKPOINT_VERSION
43
+
44
+
45
+ def check_checkpoint_args(checkpoint_args):
46
+ """Ensure fixed arguments for a model are the same for the input
47
+ arguments and the one retrieved from checkpoint."""
48
+ args = get_args()
49
+
50
+ def _compare(arg_name, old_arg_name=None, default=None):
51
+ if old_arg_name is not None:
52
+ ckpt_arg_name = old_arg_name
53
+ else:
54
+ ckpt_arg_name = arg_name
55
+ if default is not None:
56
+ checkpoint_value = getattr(checkpoint_args, ckpt_arg_name, default)
57
+ else:
58
+ checkpoint_value = getattr(checkpoint_args, ckpt_arg_name)
59
+ args_value = getattr(args, arg_name)
60
+ error_message = '{} value from checkpoint ({}) is not equal to the ' \
61
+ 'input argument value ({}).'.format(
62
+ arg_name, checkpoint_value, args_value)
63
+ assert checkpoint_value == args_value, error_message
64
+
65
+ if not args.mos and not args.kd:
66
+ _compare('num_layers')
67
+ _compare('hidden_size')
68
+ _compare('num_attention_heads')
69
+ _compare('num_key_value_heads')
70
+ _compare('add_position_embedding', default=True)
71
+ if args.vocab_file:
72
+ _compare('max_position_embeddings')
73
+ if not args.universal_checkpoint:
74
+ _compare('make_vocab_size_divisible_by')
75
+ _compare('padded_vocab_size')
76
+ _compare('tokenizer_type')
77
+ if args.data_parallel_random_init:
78
+ _compare('data_parallel_random_init')
79
+ if get_checkpoint_version() < 3.0 and not args.universal_checkpoint:
80
+ _compare('tensor_model_parallel_size',
81
+ old_arg_name='model_parallel_size')
82
+ if get_checkpoint_version() >= 3.0 and not args.universal_checkpoint:
83
+ _compare('tensor_model_parallel_size')
84
+ _compare('pipeline_model_parallel_size')
85
+
86
+
87
+ def ensure_directory_exists(filename):
88
+ """Build filename's path if it does not already exists."""
89
+ dirname = os.path.dirname(filename)
90
+ os.makedirs(dirname, exist_ok = True)
91
+
92
+
93
+ def get_checkpoint_name(checkpoints_path, iteration, release=False,
94
+ pipeline_parallel=None,
95
+ tensor_rank=None, pipeline_rank=None):
96
+ """Determine the directory name for this rank's checkpoint."""
97
+ if release:
98
+ directory = 'release'
99
+ else:
100
+ directory = 'iter_{:07d}'.format(iteration)
101
+
102
+ # Use both the tensor and pipeline MP rank.
103
+ if pipeline_parallel is None:
104
+ pipeline_parallel = (mpu.get_pipeline_model_parallel_world_size() > 1)
105
+ if tensor_rank is None:
106
+ tensor_rank = mpu.get_tensor_model_parallel_rank()
107
+ if pipeline_rank is None:
108
+ pipeline_rank = mpu.get_pipeline_model_parallel_rank()
109
+
110
+ # Use both the tensor and pipeline MP rank. If using the distributed
111
+ # optimizer, then the optimizer's path must additionally include the
112
+ # data parallel rank.
113
+ if not pipeline_parallel:
114
+ common_path = os.path.join(checkpoints_path, directory,
115
+ f'mp_rank_{tensor_rank:02d}')
116
+ else:
117
+ common_path = os.path.join(checkpoints_path, directory,
118
+ f'mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}')
119
+
120
+ return os.path.join(common_path, "model_optim_rng.pt")
121
+
122
+
123
+ def get_distributed_optimizer_checkpoint_name(model_checkpoint_name):
124
+ return os.path.join(os.path.dirname(model_checkpoint_name),
125
+ "distrib_optim.pt")
126
+
127
+
128
+ def find_checkpoint_rank_0(checkpoints_path, iteration, release=False):
129
+ """Finds the checkpoint for rank 0 without knowing if we are using
130
+ pipeline parallelism or not.
131
+
132
+ Since the checkpoint naming scheme changes if pipeline parallelism
133
+ is present, we need to look for both naming schemes if we don't
134
+ know if the checkpoint has pipeline parallelism.
135
+ """
136
+
137
+ # Look for checkpoint with no pipelining
138
+ filename = get_checkpoint_name(checkpoints_path, iteration, release,
139
+ pipeline_parallel=False,
140
+ tensor_rank=0, pipeline_rank=0)
141
+ if os.path.isfile(filename):
142
+ return filename
143
+
144
+ # Look for checkpoint with pipelining
145
+ filename = get_checkpoint_name(checkpoints_path, iteration, release,
146
+ pipeline_parallel=True,
147
+ tensor_rank=0, pipeline_rank=0)
148
+ if os.path.isfile(filename):
149
+ return filename
150
+
151
+ return None, None
152
+
153
+
154
+ def get_checkpoint_tracker_filename(checkpoints_path):
155
+
156
+ """Tracker file rescords the latest chckpoint during
157
+ training to restart from."""
158
+ return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt')
159
+
160
+
161
+ def read_metadata(tracker_filename):
162
+ # Read the tracker file and either set the iteration or
163
+ # mark it as a release checkpoint.
164
+ iteration = 0
165
+ release = False
166
+ with open(tracker_filename, 'r') as f:
167
+ metastring = f.read().strip()
168
+ try:
169
+ iteration = int(metastring)
170
+ except ValueError:
171
+ release = metastring == 'release'
172
+ if not release:
173
+ print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
174
+ tracker_filename))
175
+ sys.exit()
176
+ assert iteration > 0 or release, 'error parsing metadata file {}'.format(
177
+ tracker_filename)
178
+
179
+ # Get the max iteration retrieved across the ranks.
180
+ if torch.distributed.is_initialized():
181
+ iters_cuda = get_accelerator().LongTensor([iteration])
182
+ torch.distributed.all_reduce(iters_cuda, op=torch.distributed.ReduceOp.MAX)
183
+ max_iter = iters_cuda[0].item()
184
+
185
+ # We should now have all the same iteration.
186
+ # If not, print a warning and chose the maximum
187
+ # iteration across all ranks.
188
+ if iteration != max_iter:
189
+ print('WARNING: on rank {} found iteration {} in the '
190
+ 'metadata while max iteration across the ranks '
191
+ 'is {}, replacing it with max iteration.'.format(
192
+ torch.distributed.get_rank(), iteration, max_iter), flush=True)
193
+ else:
194
+ # When loading a checkpoint outside of training (for example,
195
+ # when editing it), we might not have torch distributed
196
+ # initialized, in this case, just assume we have the latest
197
+ max_iter = iteration
198
+ return max_iter, release
199
+
200
+
201
+ def get_rng_state():
202
+ """ collect rng state across data parallel ranks """
203
+ args = get_args()
204
+ rng_state = {
205
+ 'random_rng_state': random.getstate(),
206
+ 'np_rng_state': np.random.get_state(),
207
+ 'torch_rng_state': torch.get_rng_state(),
208
+ 'cuda_rng_state': get_accelerator().get_rng_state(),
209
+ 'rng_tracker_states': tensor_parallel.get_cuda_rng_tracker().get_states()}
210
+
211
+ rng_state_list = None
212
+ if torch.distributed.is_initialized() and \
213
+ mpu.get_data_parallel_world_size() > 1 and \
214
+ args.data_parallel_random_init:
215
+ rng_state_list = \
216
+ [None for i in range(mpu.get_data_parallel_world_size())]
217
+ torch.distributed.all_gather_object(
218
+ rng_state_list,
219
+ rng_state,
220
+ group=mpu.get_data_parallel_group())
221
+ else:
222
+ rng_state_list = [rng_state]
223
+
224
+ return rng_state_list
225
+
226
+
227
+ def save_checkpoint(iteration, model, optimizer, opt_param_scheduler):
228
+ """Save a model checkpoint."""
229
+ args = get_args()
230
+
231
+ # Only rank zero of the data parallel writes to the disk.
232
+ if not args.deepspeed:
233
+ model = unwrap_model(model)
234
+
235
+ print_rank_0('saving checkpoint at iteration {:7d} to {}'.format(
236
+ iteration, args.save))
237
+
238
+ # Collect rng state across data parallel ranks.
239
+ rng_state = get_rng_state()
240
+
241
+ # Checkpoint name.
242
+ checkpoint_name = get_checkpoint_name(args.save, iteration)
243
+
244
+ # Save distributed optimizer's custom parameter state.
245
+ if args.use_distributed_optimizer:
246
+ optim_checkpoint_name = \
247
+ get_distributed_optimizer_checkpoint_name(checkpoint_name)
248
+ ensure_directory_exists(optim_checkpoint_name)
249
+ optimizer.save_parameter_state(optim_checkpoint_name)
250
+
251
+ # Collect args, model, RNG.
252
+ if not torch.distributed.is_initialized() \
253
+ or mpu.get_data_parallel_rank() == 0 or args.deepspeed:
254
+
255
+ # Arguments, iteration, and model.
256
+ state_dict = {}
257
+ state_dict['args'] = args
258
+ state_dict['checkpoint_version'] = 3.0
259
+ state_dict['iteration'] = iteration
260
+ state_dict['tokens'] = args.consumed_train_tokens
261
+ state_dict[UNIVERSAL_CHECKPOINT_INFO] = _universal_checkpoint_info(model)
262
+
263
+ # DeepSpeed saves the model/optimizer/scheduler
264
+ if not args.deepspeed:
265
+ if len(model) == 1:
266
+ state_dict['model'] = model[0].state_dict_for_save_checkpoint()
267
+ else:
268
+ for i in range(len(model)):
269
+ mpu.set_virtual_pipeline_model_parallel_rank(i)
270
+ state_dict['model%d' % i] = \
271
+ model[i].state_dict_for_save_checkpoint()
272
+
273
+ # Optimizer stuff.
274
+ if not args.no_save_optim:
275
+ if optimizer is not None:
276
+ state_dict['optimizer'] = optimizer.state_dict()
277
+ if opt_param_scheduler is not None:
278
+ state_dict['opt_param_scheduler'] = \
279
+ opt_param_scheduler.state_dict()
280
+
281
+ # RNG states.
282
+ if not args.no_save_rng:
283
+ state_dict["rng_state"] = rng_state
284
+
285
+ # Save.
286
+ if not args.deepspeed:
287
+ ensure_directory_exists(checkpoint_name)
288
+ torch.save(state_dict, checkpoint_name)
289
+
290
+ if args.deepspeed:
291
+ #megatron model uses state_dict_for_save_checkpointing instead of the standard state_dict
292
+ #state_dict is used by deepspeed for module saving so it needs to point to the right function
293
+ if args.no_pipeline_parallel:
294
+ original_state_dict = model[0].module.state_dict
295
+ def state_dict_for_save_checkpoint_deepspeed(destination=None, prefix='', keep_vars=False):
296
+ return model[0].module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars)
297
+ model[0].module.state_dict = state_dict_for_save_checkpoint_deepspeed
298
+
299
+ # Saving is a collective communication
300
+ checkpoint_name = get_checkpoint_name(args.save, iteration)
301
+
302
+ # Trim off the filename and mp_rank_* directory.
303
+ for _ in range(3):
304
+ checkpoint_name = os.path.dirname(checkpoint_name)
305
+ model[0].save_checkpoint(checkpoint_name, client_state=state_dict)
306
+
307
+ if args.no_pipeline_parallel:
308
+ model[0].module.state_dict = original_state_dict
309
+
310
+ # Wait so everyone is done (necessary)
311
+ if torch.distributed.is_initialized():
312
+ torch.distributed.barrier()
313
+
314
+ print_rank_0(' successfully saved checkpoint at iteration {:7d} to {}' \
315
+ .format(iteration, args.save))
316
+
317
+ # And update the latest iteration
318
+ if is_rank_0():
319
+ if args.verify_checkpoint:
320
+ ckpt_folder = os.path.join(args.save, f"global_step{iteration}")
321
+ prev_iter = iteration - args.save_interval
322
+ ckpt_ok = verify_checkpoint(ckpt_folder,
323
+ args.verify_checkpoint_model_type)
324
+ if not ckpt_ok:
325
+ # Fix latest file to previous valid ckpt
326
+ with open(os.path.join(args.save, 'latest'), 'w') as fd:
327
+ fd.write(f"global_step{prev_iter}")
328
+ raise RuntimeError(f"verify_checkpoint failed!!! {ckpt_folder}")
329
+ else:
330
+ print_rank_0(f"successfully passed ckpt validation: {ckpt_folder}")
331
+ tracker_filename = get_checkpoint_tracker_filename(args.save)
332
+ with open(tracker_filename, 'w') as f:
333
+ f.write(str(iteration))
334
+
335
+ # Wait so everyone is done (not necessary)
336
+ if torch.distributed.is_initialized():
337
+ torch.distributed.barrier()
338
+
339
+
340
+ def _transpose_first_dim(t, num_splits, num_splits_first, model):
341
+ input_shape = t.size()
342
+ # We use a self_attention module but the values extracted aren't
343
+ # specific to self attention so should work for cross attention as well
344
+ while hasattr(model, 'module'):
345
+ model = model.module
346
+ attention_module = model.language_model.encoder.layers[0].self_attention
347
+ #attention_module = model.language_model.encoder.layers[0].attention
348
+ hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head
349
+ num_attention_heads_per_partition = attention_module.num_attention_heads_per_partition
350
+ if num_splits_first:
351
+ """[num_splits * np * hn, h]
352
+ -->(view) [num_splits, np, hn, h]
353
+ -->(tranpose) [np, num_splits, hn, h]
354
+ -->(view) [np * num_splits * hn, h] """
355
+
356
+ intermediate_shape = \
357
+ (num_splits, num_attention_heads_per_partition,
358
+ hidden_size_per_attention_head) + input_shape[1:]
359
+
360
+ t = t.view(*intermediate_shape)
361
+ t = t.transpose(0, 1).contiguous()
362
+ else:
363
+ """[np * hn * num_splits, h]
364
+ -->(view) [np, hn, num_splits, h]
365
+ -->(tranpose) [np, num_splits, hn, h]
366
+ -->(view) [np * num_splits * hn, h] """
367
+
368
+ intermediate_shape = \
369
+ (num_attention_heads_per_partition,
370
+ hidden_size_per_attention_head, num_splits) +\
371
+ input_shape[1:]
372
+
373
+ t = t.view(*intermediate_shape)
374
+ t = t.transpose(1, 2).contiguous()
375
+ t = t.view(*input_shape)
376
+
377
+ return t
378
+
379
+ def fix_query_key_value_ordering(model, checkpoint_version):
380
+ """Fix up query/key/value matrix ordering if checkpoint
381
+ version is smaller than 2.0
382
+ """
383
+ if checkpoint_version < 2.0:
384
+ if isinstance(model, list):
385
+ assert len(model)==1
386
+ model = model[0]
387
+ for name, param in model.named_parameters():
388
+ if name.endswith(('.query_key_value.weight', '.query_key_value.bias')):
389
+ if checkpoint_version == 0:
390
+ fixed_param = _transpose_first_dim(param.data, 3, True, model)
391
+ elif checkpoint_version == 1.0:
392
+ fixed_param = _transpose_first_dim(param.data, 3, False, model)
393
+ else:
394
+ print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
395
+ sys.exit()
396
+ param.data.copy_(fixed_param)
397
+ if name.endswith(('.key_value.weight', '.key_value.bias')):
398
+ if checkpoint_version == 0:
399
+ fixed_param = _transpose_first_dim(param.data, 2, True, model)
400
+ elif checkpoint_version == 1.0:
401
+ fixed_param = _transpose_first_dim(param.data, 2, False, model)
402
+ else:
403
+ print_rank_0(f"Invalid checkpoint version {checkpoint_version}.")
404
+ sys.exit()
405
+ param.data.copy_(fixed_param)
406
+ print_rank_0(" succesfully fixed query-key-values ordering for"
407
+ " checkpoint version {}".format(checkpoint_version))
408
+
409
+
410
+ def _load_base_checkpoint(load_dir, rank0=False):
411
+ """ Load the base state_dict from the given directory
412
+
413
+ If rank0 is true, just loads rank 0 checkpoint, ignoring arguments.
414
+ """
415
+
416
+ # Read the tracker file and set the iteration.
417
+ tracker_filename = get_checkpoint_tracker_filename(load_dir)
418
+
419
+ # If no tracker file, return nothing
420
+ if not os.path.isfile(tracker_filename):
421
+ if not rank0:
422
+ print_rank_0('WARNING: could not find the metadata file {} '.format(
423
+ tracker_filename))
424
+ print_rank_0(' will not load any checkpoints and will start from '
425
+ 'random')
426
+ return None, False
427
+
428
+ # Otherwise, read the tracker file and either set the iteration or
429
+ # mark it as a release checkpoint.
430
+ iteration, release = read_metadata(tracker_filename)
431
+
432
+ # Checkpoint.
433
+ if rank0:
434
+ checkpoint_name = find_checkpoint_rank_0(load_dir, iteration, release)
435
+ else:
436
+ checkpoint_name = get_checkpoint_name(load_dir, iteration, release)
437
+ if release:
438
+ print_rank_0(f' loading release checkpoint from {load_dir}')
439
+ else:
440
+ print_rank_0(f' loading checkpoint from {load_dir} at iteration {iteration}')
441
+
442
+ # Load the checkpoint.
443
+ try:
444
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
445
+ except ModuleNotFoundError:
446
+ from megatron.fp16_deprecated import loss_scaler
447
+ # For backward compatibility.
448
+ if not rank0:
449
+ print_rank_0(' > deserializing using the old code structure ...')
450
+ sys.modules['fp16.loss_scaler'] = sys.modules[
451
+ 'megatron.fp16_deprecated.loss_scaler']
452
+ sys.modules['megatron.fp16.loss_scaler'] = sys.modules[
453
+ 'megatron.fp16_deprecated.loss_scaler']
454
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
455
+ sys.modules.pop('fp16.loss_scaler', None)
456
+ sys.modules.pop('megatron.fp16.loss_scaler', None)
457
+ except BaseException as e:
458
+ print_rank_0('could not load the checkpoint')
459
+ print_rank_0(e)
460
+ sys.exit()
461
+
462
+ return state_dict, release
463
+
464
+
465
+ def load_args_from_checkpoint(args, load_arg='load'):
466
+ """Set required arguments from the checkpoint specified in the
467
+ arguments.
468
+
469
+ Will overwrite arguments that have a non-None default value, but
470
+ will leave any arguments that default to None as set.
471
+
472
+ Returns the same args NameSpace with the new values added/updated.
473
+
474
+ If no checkpoint is specified in args, or if the checkpoint is
475
+ there but invalid, the arguments will not be modified
476
+
477
+ """
478
+ load_dir = getattr(args, load_arg)
479
+
480
+ if load_dir is None:
481
+ print_rank_0('No load directory specified, using provided arguments.')
482
+ return args
483
+
484
+ state_dict, release = _load_base_checkpoint(load_dir, rank0=True)
485
+
486
+ # Args.
487
+ if not state_dict:
488
+ print_rank_0('Checkpoint not found to provide arguments, using provided arguments.')
489
+ return args
490
+
491
+ if 'args' not in state_dict:
492
+ print_rank_0('Checkpoint provided does not have arguments saved, using provided arguments.')
493
+ return args
494
+
495
+ checkpoint_args = state_dict['args']
496
+ checkpoint_version = state_dict.get('checkpoint_version', 0)
497
+ args.iteration = state_dict['iteration']
498
+
499
+ # One-off conversion for foundation models
500
+ if hasattr(checkpoint_args, 'disable_bias_linear'):
501
+ setattr(checkpoint_args, 'add_bias_linear', not getattr(checkpoint_args, 'disable_bias_linear'))
502
+
503
+ def _set_arg(arg_name, old_arg_name=None, force=False):
504
+ if not force and getattr(args, arg_name, None) is not None:
505
+ return
506
+
507
+ if old_arg_name is not None:
508
+ checkpoint_value = getattr(checkpoint_args, old_arg_name, None)
509
+ else:
510
+ checkpoint_value = getattr(checkpoint_args, arg_name, None)
511
+
512
+ if checkpoint_value is not None:
513
+ print_rank_0(f"Setting {arg_name} to {checkpoint_value} from checkpoint")
514
+ setattr(args, arg_name, checkpoint_value)
515
+ else:
516
+ print_rank_0(f"Checkpoint did not provide arguments {arg_name}")
517
+
518
+ _set_arg('num_layers')
519
+ _set_arg('hidden_size')
520
+ _set_arg('ffn_hidden_size')
521
+ _set_arg('seq_length')
522
+ _set_arg('num_attention_heads')
523
+ _set_arg('num_key_value_heads')
524
+ _set_arg('kv_channels')
525
+ _set_arg('max_position_embeddings')
526
+ _set_arg('add_position_embedding', force=True)
527
+ _set_arg('use_rotary_position_embeddings', force=True)
528
+ _set_arg('rotary_percent', force=True)
529
+ _set_arg('add_bias_linear', force=True)
530
+ _set_arg('swiglu', force=True)
531
+ _set_arg('untie_embeddings_and_output_weights', force=True)
532
+ _set_arg('apply_layernorm_1p', force=True)
533
+ _set_arg('tokenizer_type')
534
+ _set_arg('padded_vocab_size')
535
+ if checkpoint_version < 3.0:
536
+ _set_arg('tensor_model_parallel_size',
537
+ 'model_parallel_size')
538
+ else:
539
+ _set_arg('tensor_model_parallel_size', force=True)
540
+ _set_arg('pipeline_model_parallel_size', force=True)
541
+ _set_arg('virtual_pipeline_model_parallel_size', force=True)
542
+ _set_arg('num_layers_per_virtual_pipeline_stage')
543
+ return args, checkpoint_args
544
+
545
+
546
+ def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True, load_only_weights=False):
547
+ """Load a model checkpoint and return the iteration.
548
+ strict (bool): whether to strictly enforce that the keys in
549
+ :attr:`state_dict` of the checkpoint match the names of
550
+ parameters and buffers in model.
551
+ """
552
+ args = get_args()
553
+ load_dir = getattr(args, load_arg)
554
+
555
+ if args.deepspeed:
556
+ if args.finetune:
557
+ loaded_dir, state_dict = model[0].load_checkpoint(load_dir,
558
+ load_module_strict=strict, load_optimizer_states=False,
559
+ load_lr_scheduler_states=False, load_module_only=True)
560
+ else:
561
+ loaded_dir, state_dict = model[0].load_checkpoint(load_dir,
562
+ load_module_strict=strict)
563
+ if loaded_dir is None:
564
+ print_rank_0('WARNING: could not find the metadata file {} '.format(
565
+ load_dir))
566
+ print_rank_0(' will not load any checkpoints and will start from '
567
+ 'random')
568
+ return 0
569
+ release = False
570
+ else:
571
+ model = unwrap_model(model)
572
+
573
+ state_dict, release = _load_base_checkpoint(load_dir, rank0=False)
574
+
575
+ # Checkpoint not loaded.
576
+ if state_dict is None:
577
+
578
+ # Conditionally exit at this point.
579
+ if args.exit_on_missing_checkpoint:
580
+ print_rank_0(">> '--exit-on-missing-checkpoint' set ... exiting. <<")
581
+ torch.distributed.barrier()
582
+ sys.exit()
583
+
584
+ # Iteration defaults to 0.
585
+ return 0
586
+ checkpoint_name = get_checkpoint_name(load_dir, state_dict['iteration'], release)
587
+
588
+ # Set checkpoint version.
589
+ set_checkpoint_version(state_dict.get('checkpoint_version', 0))
590
+
591
+ # Set iteration.
592
+ if args.finetune or release or args.reset_iteration or load_only_weights:
593
+ iteration = 0
594
+ # Make DeepSpeed engine aware of this reset of iteration
595
+ model[0].global_steps = 0
596
+ else:
597
+ try:
598
+ iteration = state_dict['iteration']
599
+ if 'tokens' in state_dict:
600
+ args.consumed_train_tokens = state_dict['tokens']
601
+ except KeyError:
602
+ try: # Backward compatible with older checkpoints
603
+ iteration = state_dict['total_iters']
604
+ except KeyError:
605
+ print_rank_0('A metadata file exists but unable to load '
606
+ 'iteration from checkpoint {}, exiting'.format(
607
+ checkpoint_name))
608
+ sys.exit()
609
+
610
+ # Check arguments.
611
+ reset_train_valid_samples = args.reset_iteration
612
+ if not load_only_weights and not reset_train_valid_samples:
613
+ assert args.consumed_train_samples == 0
614
+ assert args.consumed_valid_samples == 0
615
+ if 'args' in state_dict and not args.finetune:
616
+ checkpoint_args = state_dict['args']
617
+ check_checkpoint_args(checkpoint_args)
618
+ args.consumed_train_samples = getattr(checkpoint_args,
619
+ 'consumed_train_samples', 0)
620
+ update_num_microbatches(consumed_samples=args.consumed_train_samples)
621
+ args.consumed_valid_samples = getattr(checkpoint_args,
622
+ 'consumed_valid_samples', 0)
623
+ else:
624
+ print_rank_0('could not find arguments in the checkpoint ...')
625
+
626
+ # Model.
627
+ if not args.deepspeed:
628
+ if len(model) == 1:
629
+ model[0].load_state_dict(state_dict['model'], strict=strict)
630
+ else:
631
+ for i in range(len(model)):
632
+ mpu.set_virtual_pipeline_model_parallel_rank(i)
633
+ model[i].load_state_dict(state_dict['model%d' % i], strict=strict)
634
+
635
+ # Fix up query/key/value matrix ordering if needed.
636
+ checkpoint_version = get_checkpoint_version()
637
+ print_rank_0(f' checkpoint version {checkpoint_version}')
638
+ fix_query_key_value_ordering(model, checkpoint_version)
639
+
640
+ # Optimizer.
641
+ if not args.deepspeed:
642
+ if not release and not args.finetune and not args.no_load_optim:
643
+ try:
644
+ # Load state dict.
645
+ if optimizer is not None:
646
+ optimizer.load_state_dict(state_dict['optimizer'])
647
+
648
+ # Load distributed optimizer's custom parameter state.
649
+ if args.use_distributed_optimizer:
650
+ tracker_filename = get_checkpoint_tracker_filename(load_dir)
651
+ iteration, release = read_metadata(tracker_filename)
652
+ model_checkpoint_name = \
653
+ get_checkpoint_name(load_dir, iteration, release)
654
+ optim_checkpoint_name = \
655
+ get_distributed_optimizer_checkpoint_name(
656
+ model_checkpoint_name)
657
+ optimizer.load_parameter_state(optim_checkpoint_name)
658
+
659
+ # Load scheduler.
660
+ if opt_param_scheduler is not None:
661
+ if 'lr_scheduler' in state_dict: # backward compatbility
662
+ opt_param_scheduler.load_state_dict(state_dict['lr_scheduler'])
663
+ else:
664
+ opt_param_scheduler.load_state_dict(state_dict['opt_param_scheduler'])
665
+ except KeyError:
666
+ print_rank_0('Unable to load optimizer from checkpoint {}. '
667
+ 'Specify --no-load-optim or --finetune to prevent '
668
+ 'attempting to load the optimizer state, '
669
+ 'exiting ...'.format(checkpoint_name))
670
+ sys.exit()
671
+ else:
672
+ if (args.fp16 or args.bf16) and optimizer is not None:
673
+ optimizer.reload_model_params()
674
+
675
+ # rng states.
676
+ if not release and not args.finetune and not args.no_load_rng:
677
+ try:
678
+ if 'rng_state' in state_dict:
679
+ # access rng_state for data parallel rank
680
+ if args.data_parallel_random_init:
681
+ rng_state = state_dict['rng_state'][mpu.get_data_parallel_rank()]
682
+ else:
683
+ rng_state = state_dict['rng_state'][0]
684
+ random.setstate(rng_state['random_rng_state'])
685
+ np.random.set_state(rng_state['np_rng_state'])
686
+ torch.set_rng_state(rng_state['torch_rng_state'])
687
+ get_accelerator().set_rng_state(rng_state['cuda_rng_state'])
688
+ # Check for empty states array
689
+ if not rng_state['rng_tracker_states']:
690
+ raise KeyError
691
+ tensor_parallel.get_cuda_rng_tracker().set_states(
692
+ rng_state['rng_tracker_states'])
693
+ else: # backward compatability
694
+ random.setstate(state_dict['random_rng_state'])
695
+ np.random.set_state(state_dict['np_rng_state'])
696
+ torch.set_rng_state(state_dict['torch_rng_state'])
697
+ get_accelerator().set_rng_state(state_dict['cuda_rng_state'])
698
+ # Check for empty states array
699
+ if not state_dict['rng_tracker_states']:
700
+ raise KeyError
701
+ tensor_parallel.get_cuda_rng_tracker().set_states(
702
+ state_dict['rng_tracker_states'])
703
+ except KeyError:
704
+ print_rank_0('Unable to load rng state from checkpoint {}. '
705
+ 'Specify --no-load-rng or --finetune to prevent '
706
+ 'attempting to load the rng state, '
707
+ 'exiting ...'.format(checkpoint_name))
708
+ sys.exit()
709
+
710
+ if args.universal_checkpoint:
711
+ # TLDR: unique rng is needed for dropout to be really random on TP ranks
712
+ #
713
+ # Each tp-rank stores its model-parallel-rng states info.
714
+ # This is required to e.g. have different dropout patterns on different tp ranks that operate on
715
+ # slices of attention_probs tensor.
716
+ #
717
+ # When loading from universal checkpoint, we use mp_rank_<mp>_model_states.pt checkpoint files
718
+ # to restore the model-parallel-rng (<mp> is {tp-rank, pp-rank} combination).
719
+ # However, if the loaded checkpoint mp configuration does not match the current mp configuration,
720
+ # we can not use it to restore model-parallel-rng info.
721
+ #
722
+ # In the case of mp configuration change, we reconfigure the model-parallel-rng states s.t. each
723
+ # tp-rank will have a unique state. In order to ensure that subsequent loads from universal will
724
+ # not cause the model-parallel-rng states to be repeated, we add the iteration number to the base seed.
725
+ ckp_args = state_dict['args']
726
+ if ((args.tensor_model_parallel_size != ckp_args.tensor_model_parallel_size)
727
+ or (args.pipeline_model_parallel_size != ckp_args.pipeline_model_parallel_size)):
728
+ print_rank_0(' loading universal checkpoint with modified mp configuration '
729
+ '-> reconfigure tp seed')
730
+ tensor_parallel.model_parallel_reconfigure_tp_seed(args.seed + iteration)
731
+
732
+ # Some utilities want to load a checkpoint without distributed being initialized
733
+ if torch.distributed.is_initialized():
734
+ torch.distributed.barrier()
735
+
736
+ print_rank_0(f' successfully loaded checkpoint from {args.load} '
737
+ f'at iteration {iteration}')
738
+
739
+ # from .utils import dump_weights, dump_position_embed_weights
740
+ # dump_weights(f'{args.universal_checkpoint=}', iteration, model, optimizer)
741
+ # dump_position_embed_weights("init", 0, model)
742
+
743
+ return iteration
744
+
745
+
746
+ def load_biencoder_checkpoint(model, only_query_model=False,
747
+ only_context_model=False, custom_load_path=None):
748
+ """
749
+ selectively load retrieval models for indexing/retrieving
750
+ from saved checkpoints
751
+ """
752
+
753
+ args = get_args()
754
+
755
+ model = unwrap_model(model)
756
+
757
+ load_path = custom_load_path if custom_load_path is not None else args.load
758
+
759
+ tracker_filename = get_checkpoint_tracker_filename(load_path)
760
+ with open(tracker_filename, 'r') as f:
761
+ iteration = int(f.read().strip())
762
+
763
+ checkpoint_name = get_checkpoint_name(load_path, iteration,
764
+ args.use_distributed_optimizer,
765
+ release=False)
766
+
767
+ if mpu.get_data_parallel_rank() == 0:
768
+ print('global rank {} is loading checkpoint {}'.format(
769
+ torch.distributed.get_rank(), checkpoint_name))
770
+
771
+ state_dict = torch.load(checkpoint_name, map_location='cpu')
772
+ ret_state_dict = state_dict['model']
773
+
774
+ if only_query_model:
775
+ ret_state_dict.pop('context_model')
776
+ if only_context_model:
777
+ ret_state_dict.pop('query_model')
778
+
779
+ assert len(model) == 1
780
+ model[0].load_state_dict(ret_state_dict)
781
+ torch.distributed.barrier()
782
+
783
+ if mpu.get_data_parallel_rank() == 0:
784
+ print(' successfully loaded {}'.format(checkpoint_name))
785
+
786
+ return model
787
+
788
+
789
+ def _universal_checkpoint_info(model):
790
+ args = get_args()
791
+ tokenizer = get_tokenizer()
792
+ info = dict()
793
+ info[UNIVERSAL_CHECKPOINT_VERSION_KEY] = UNIVERSAL_CHECKPOINT_VERSION_VALUE
794
+ info[ORIGINAL_VOCAB_SIZE] = tokenizer.vocab_size
795
+ info[PADDED_VOCAB_SIZE] = args.padded_vocab_size
796
+ info.update(model[0].universal_checkpoint_info())
797
+ return info
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Megatron Core is a library for efficient and scalable training of transformer based models.
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/enums.cpython-310.pyc ADDED
Binary file (432 Bytes). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/enums.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import enum
4
+
5
+ class ModelType(enum.Enum):
6
+ encoder_or_decoder = 1
7
+ encoder_and_decoder = 2
8
+ retro_encoder = 3
9
+ retro_decoder = 4
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/__init__.py ADDED
File without changes
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_bias_dropout.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+ from typing import Tuple, Optional
5
+
6
+ def _bias_dropout_add_func(x, bias, residual, prob, training):
7
+ # type: (Tensor, Optional[Tensor], Tensor, float, bool) -> Tensor
8
+ # NOTE: Previously, the argument `bias` used to be passed as
9
+ # `bias.expand_as(residual)` when the `bias_dropout_func` is called from the
10
+ # transformer layer but broadcasting should automatically take care of that.
11
+ # Also, looking at broadcasting semantics, `expand_as` and broadcasting
12
+ # seem to be identical performance-wise (both just change the view).
13
+ if bias is not None:
14
+ x = x + bias
15
+ out = torch.nn.functional.dropout(x, p=prob, training=training)
16
+ out = residual + out
17
+ return out
18
+
19
+ def get_bias_dropout_add(training, fused):
20
+
21
+ def unfused_bias_dropout_add(x_with_bias, residual, prob):
22
+ x, bias = x_with_bias # unpack
23
+ return _bias_dropout_add_func(x, bias, residual, prob, training)
24
+
25
+ @torch.jit.script
26
+ def bias_dropout_add_fused_train(
27
+ x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]],
28
+ residual: torch.Tensor,
29
+ prob: float
30
+ ) -> torch.Tensor:
31
+ x, bias = x_with_bias # unpack
32
+ return _bias_dropout_add_func(x, bias, residual, prob, True)
33
+
34
+ @torch.jit.script
35
+ def bias_dropout_add_fused_inference(
36
+ x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]],
37
+ residual: torch.Tensor,
38
+ prob: float
39
+ ) -> torch.Tensor:
40
+ x, bias = x_with_bias # unpack
41
+ return _bias_dropout_add_func(x, bias, residual, prob, False)
42
+
43
+ if fused:
44
+ # jit scripting for a nn.module (with dropout) is not
45
+ # triggering the fusion kernel. For now, we use two
46
+ # different nn.functional routines to account for varying
47
+ # dropout semantics during training and inference phases.
48
+ if training:
49
+ return bias_dropout_add_fused_train
50
+ else:
51
+ return bias_dropout_add_fused_inference
52
+ else:
53
+ return unfused_bias_dropout_add
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_layer_norm.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import numbers
4
+ import torch
5
+ from torch.nn.parameter import Parameter
6
+ from torch.nn import init
7
+ import importlib
8
+
9
+ from megatron.core.utils import make_viewless_tensor
10
+
11
+ try:
12
+ from apex.contrib.layer_norm.layer_norm import FastLayerNormFN
13
+ HAVE_PERSIST_LAYER_NORM = True
14
+ except:
15
+ HAVE_PERSIST_LAYER_NORM = False
16
+
17
+ try:
18
+ from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction
19
+ HAVE_FUSED_LAYER_NORM = True
20
+ except:
21
+ HAVE_FUSED_LAYER_NORM = False
22
+
23
+
24
+ class FusedLayerNorm(torch.nn.Module):
25
+
26
+ def __init__(self, hidden_size, eps=1e-5,
27
+ persist_layer_norm=True,
28
+ sequence_parallel=False,
29
+ zero_centered_gamma=False):
30
+ super().__init__()
31
+
32
+ self.zero_centered_gamma = zero_centered_gamma
33
+
34
+ # List of hiddens sizes supported in the persistent layer norm kernel
35
+ # If the hidden size is not supported, fall back to the non-persistent
36
+ # kernel.
37
+ persist_ln_hidden_sizes = [1024, 1536, 2048, 2304, 3072, 3840, 4096,
38
+ 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480,
39
+ 24576, 25600, 30720, 32768, 40960, 49152, 65536]
40
+ if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM:
41
+ persist_layer_norm = False
42
+
43
+ if not persist_layer_norm and not HAVE_FUSED_LAYER_NORM:
44
+ # TODO: Add pytorch only layer norm
45
+ raise ValueError(f'Apex must currently be installed to use megatron core.')
46
+
47
+ if isinstance(hidden_size, numbers.Integral):
48
+ hidden_size = (hidden_size,)
49
+ self.hidden_size = torch.Size(hidden_size)
50
+ self.eps = eps
51
+ self.weight = Parameter(torch.Tensor(*hidden_size))
52
+ self.bias = Parameter(torch.Tensor(*hidden_size))
53
+ self.reset_parameters()
54
+ self.persist_layer_norm = persist_layer_norm
55
+ self.sequence_parallel = sequence_parallel
56
+
57
+ # set sequence parallelism flag on weight and bias parameters
58
+ setattr(self.weight, 'sequence_parallel', self.sequence_parallel)
59
+ setattr(self.bias, 'sequence_parallel', self.sequence_parallel)
60
+
61
+
62
+ def reset_parameters(self):
63
+
64
+ if self.zero_centered_gamma:
65
+ init.zeros_(self.weight)
66
+ init.zeros_(self.bias)
67
+ else:
68
+ init.ones_(self.weight)
69
+ init.zeros_(self.bias)
70
+
71
+ def forward(self, input):
72
+
73
+ weight = self.weight + 1 if self.zero_centered_gamma else self.weight
74
+
75
+ if self.persist_layer_norm:
76
+ output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)
77
+
78
+ # Apex's fast layer norm function outputs a 'view' tensor (i.e., has
79
+ # a populated '_base' field). This will result in schedule.py's
80
+ # deallocate_output_tensor() throwing an error, so a viewless tensor is
81
+ # created to prevent this.
82
+ output = make_viewless_tensor(inp = output,
83
+ requires_grad = input.requires_grad,
84
+ keep_graph = True)
85
+
86
+ else:
87
+ output = FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.hidden_size, self.eps)
88
+
89
+ return output
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/model_parallel_config.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Callable
5
+
6
+ import torch
7
+
8
+ @dataclass
9
+ class ModelParallelConfig:
10
+ """Base configuration for Megatron Core
11
+
12
+ Model Parallelism
13
+ -----------------
14
+
15
+ tensor_model_parallel_size (int): Intra-layer model parallelism. Splits tensors across GPU ranks. Defaults to 1.
16
+
17
+ pipeline_model_parallel_size (int): Inter-layer model parallelism. Splits transformer layers across GPU
18
+ ranks. Defaults to 1.
19
+
20
+ virtual_pipeline_model_parallel_size (int): Interleaved pipeline parallelism is used to improve performance by
21
+ reducing the pipeline bubble. Considers a transformer block as a list of smaller transformer (virtual) blocks.
22
+ The number of virtual blocks per pipeline model parallel rank is the virtual model parallel size. See Efficient
23
+ Large-Scale Language Model Training on GPU Clusters Using Megatron-LM: https://arxiv.org/pdf/2104.04473.pdf for
24
+ more details. Defaults to None.
25
+
26
+ sequence_parallel (bool): Makes tensor parallelism more memory efficient for LLMs (20B+) by
27
+ parallelizing layer norms and dropout sequentially. See Reducing Activation Recomputation in Large Transformer
28
+ Models: https://arxiv.org/abs/2205.05198 for more details. Defaults to False.
29
+
30
+ Initialization
31
+ --------------
32
+
33
+ perform_initialization (bool, default=True): If true, weights are initialized. This option can be useful when you
34
+ know you are going to load values from a checkpoint.
35
+
36
+ use_cpu_initialization: (bool, default=False): When set to False, we initialize the weights directly on the GPU.
37
+ Transferring weights from CPU to GPU can take a significant amount of time for large models. Defaults to False.
38
+
39
+ Training
40
+ --------
41
+
42
+ fp16 (bool): If true, train with fp16 mixed precision training. Defaults to False.
43
+
44
+ bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False.
45
+
46
+ params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32
47
+
48
+ timers (optional, default=None): TODO
49
+
50
+ Optimizations
51
+ -------------
52
+
53
+ gradient_accumulation_fusion (bool): If true, fuses weight gradient accumulation to GEMMs. Requires the custom CUDA
54
+ extension fused_weight_gradient_mlp_cuda module. To use gradient_accumulation_fusion you must install APEX with
55
+ --cpp_ext and --cuda_ext. For example: "pip install --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\"
56
+ ". Note that the extension requires CUDA>=11. Otherwise, you must turn off gradient accumulation fusion.
57
+ Defaults to False.
58
+
59
+ async_tensor_model_parallel_allreduce (bool, default=True): If true, enables asynchronous execution of
60
+ tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False.
61
+
62
+ Pipeline Parallelism
63
+ --------------------
64
+
65
+ pipeline_dtype (required): dtype used in p2p communication, usually params_dtype
66
+
67
+ grad_scale_func (optional, default=None): If using loss scaling, this function should take the loss and return the
68
+ scaled loss. If None, no function is called on the loss.
69
+
70
+ enable_autocast (bool): If true runs the forward step function inside torch.autocast context. Default is False.
71
+
72
+ autocast_dtype (torch.dtype): dtype to pass to torch.amp.autocast when enabled. Default is pipeline_dtype.
73
+
74
+ variable_seq_lengths (bool, default=False): Support for variable sequence lengths across microbatches. Setting this
75
+ communicates the size of tensors during pipeline parallelism communication, because of this extra overhead it
76
+ should only be set if the sequence length varies by microbatch within a global batch.
77
+
78
+ num_microbatches_with_partial_activation_checkpoints (int, default=None): If int, set the number of microbatches
79
+ where not all of the layers will be checkpointed and recomputed. The rest of the microbatches within the window
80
+ of maximum outstanding microbatches will recompute all layers (either full recompute or selective recompute). If
81
+ None, the checkpoint and recompute will be left up to the forward_step function.
82
+
83
+ overlap_p2p_comm (bool, optional, default=False): When True some of the peer to peer communication for pipeline
84
+ parallelism will overlap with computation. Must be False if batch_p2p_comm is true.
85
+
86
+ batch_p2p_comm (bool, default=True): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False
87
+ if overlap_p2p_comm is True.
88
+
89
+ batch_p2p_sync (bool, default=True): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work
90
+ around a bug in older version of PyTorch.
91
+
92
+ use_ring_exchange_p2p (bool, default = False): Use custom ring_exchange kernel instead of
93
+ torch.distributed.batch_isend_irecv(). Requires custom built torch with torch.distributed.ring_exchange.
94
+
95
+ deallocate_pipeline_outputs (optional, default=False): If True, output data is deallocated after the tensor is sent
96
+ to the next pipeline stage. Helps with saving memory, does nothing when pipeline parallel is not used.
97
+
98
+ no_sync_func (optional): Function that creates a context that suppresses asynchronous data-parallel
99
+ communication. If the model is an instance of torch.nn.DistributedDataParallel, the default is to use
100
+ torch.nn.DistributedDataParallel.no_sync.
101
+
102
+ grad_sync_func (optional): Function that launches asynchronous gradient reductions (e.g. distributed optimizer
103
+ gradient reduce-scatters). The function should take one argument: an iterable of parameters whose gradients are
104
+ to be synchronized.
105
+
106
+ param_sync_func (optional): Function that launches asynchronous parameter synchronizations (e.g. distributed
107
+ optimizer parameter all-gathers). The function should take one argument: an iterable of parameters to be
108
+ synchronized.
109
+
110
+ """
111
+
112
+ # Model parallelism
113
+ tensor_model_parallel_size: int = 1
114
+ pipeline_model_parallel_size: int = 1
115
+ virtual_pipeline_model_parallel_size: int = None
116
+ sequence_parallel: bool = False
117
+
118
+ # Initialization
119
+ perform_initialization: bool = True
120
+ use_cpu_initialization: bool = False
121
+
122
+ # Training
123
+ fp16: bool = False
124
+ bf16: bool = False
125
+ params_dtype: torch.dtype = torch.float32
126
+ timers: Callable = None
127
+
128
+ # Optimizations
129
+ gradient_accumulation_fusion: bool = False
130
+ async_tensor_model_parallel_allreduce: bool = False
131
+
132
+ # Pipeline Parallel
133
+ pipeline_dtype: torch.dtype = None
134
+ grad_scale_func: Callable = None
135
+ enable_autocast: bool = False
136
+ autocast_dtype: torch.dtype = None
137
+ variable_seq_lengths: bool = False
138
+ num_microbatches_with_partial_activation_checkpoints: int = None
139
+ overlap_p2p_comm: bool = False
140
+ batch_p2p_comm: bool = True
141
+ batch_p2p_sync: bool = True
142
+ use_ring_exchange_p2p: bool = False
143
+ deallocate_pipeline_outputs: bool = False
144
+ no_sync_func: Callable = None
145
+ grad_sync_func: Callable = None
146
+ param_sync_func: Callable = None
147
+
148
+ def __post_init__(self):
149
+ """ Python dataclass method that is used to modify attributes after initialization.
150
+ See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details.
151
+ """
152
+ if self.sequence_parallel:
153
+ if self.tensor_model_parallel_size <= 1:
154
+ raise ValueError("Can not use sequence paralllelism without tensor parallelism")
155
+ if self.async_tensor_model_parallel_allreduce:
156
+ # sequence_parallelism already does this async
157
+ self.async_tensor_model_parallel_allreduce = False
158
+
159
+ if self.pipeline_model_parallel_size > 1:
160
+ if self.pipeline_dtype is None:
161
+ raise ValueError("When using pipeline parallelism, pipeline_dtype must be specified")
162
+
163
+ if self.autocast_dtype is None:
164
+ self.autocast_dtype = self.params_dtype
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/parallel_state.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Model and data parallel groups."""
4
+
5
+ import torch
6
+ from typing import Optional
7
+
8
+ from .utils import GlobalMemoryBuffer
9
+
10
+ # Intra-layer model parallel group that the current rank belongs to.
11
+ _TENSOR_MODEL_PARALLEL_GROUP = None
12
+ # Inter-layer model parallel group that the current rank belongs to.
13
+ _PIPELINE_MODEL_PARALLEL_GROUP = None
14
+ # Model parallel group (both intra- and pipeline) that the current rank belongs to.
15
+ _MODEL_PARALLEL_GROUP = None
16
+ # Embedding group.
17
+ _EMBEDDING_GROUP = None
18
+ # Position embedding group.
19
+ _POSITION_EMBEDDING_GROUP = None
20
+ # Data parallel group that the current rank belongs to.
21
+ _DATA_PARALLEL_GROUP = None
22
+ _DATA_PARALLEL_GROUP_GLOO = None
23
+ # FP8 amax reduction group.
24
+ _AMAX_REDUCTION_GROUP = None
25
+
26
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
27
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
28
+ _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = None
29
+
30
+ # These values enable us to change the mpu sizes on the fly.
31
+ _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
32
+ _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
33
+ _MPU_TENSOR_MODEL_PARALLEL_RANK = None
34
+ _MPU_PIPELINE_MODEL_PARALLEL_RANK = None
35
+
36
+ # A list of ranks that have a copy of the embedding.
37
+ _EMBEDDING_GLOBAL_RANKS = None
38
+
39
+ # A list of ranks that have a copy of the position embedding.
40
+ _POSITION_EMBEDDING_GLOBAL_RANKS = None
41
+
42
+ # A list of global ranks for each pipeline group to ease calculation of the source
43
+ # rank when broadcasting from the first or last pipeline stage.
44
+ _PIPELINE_GLOBAL_RANKS = None
45
+
46
+ # For DeepSpeed's sequence parallel
47
+ _SEQUENCE_PARALLEL_GROUP = None
48
+ _SEQUENCE_PARALLEL_WORLD_SIZE = None
49
+ _SEQUENCE_PARALLEL_RANK = None
50
+
51
+ # This group includes processes for both data and sequence parallelisms.
52
+ # We use this group to reduce gradients and shard parameters and optimizer stages for ZeRO.
53
+ _SEQUENCE_DATA_PARALLEL_GROUP = None
54
+ _SEQUENCE_DATA_PARALLEL_WORLD_SIZE = None
55
+ _SEQUENCE_DATA_PARALLEL_RANK = None
56
+
57
+ # A list of global ranks for each data parallel group to ease calculation of the source
58
+ # rank when broadcasting weights from src to all other data parallel ranks
59
+ _DATA_PARALLEL_GLOBAL_RANKS = None
60
+
61
+ # Memory buffers to avoid dynamic memory allocation
62
+ _GLOBAL_MEMORY_BUFFER = None
63
+
64
+
65
+ def initialize_model_parallel(
66
+ tensor_model_parallel_size: int = 1,
67
+ pipeline_model_parallel_size: int = 1,
68
+ sequence_parallel_size: int = 1,
69
+ virtual_pipeline_model_parallel_size: Optional[int] = None,
70
+ pipeline_model_parallel_split_rank: Optional[int] = None,
71
+ use_fp8: bool = False,
72
+ use_distributed_optimizer: bool = False,
73
+ ) -> None:
74
+ """Initialize model data parallel groups.
75
+
76
+ Arguments:
77
+ tensor_model_parallel_size (int, default = 1):
78
+ The number of GPUs to split individual tensors across.
79
+
80
+ pipeline_model_parallel_size (int, default = 1):
81
+ The number of tensor parallel GPU groups to split the
82
+ Transformer layers across. For example, if
83
+ tensor_model_parallel_size is 4 and
84
+ pipeline_model_parallel_size is 2, the model will be split
85
+ into 2 groups of 4 GPUs.
86
+
87
+ virtual_pipeline_model_parallel_size (int, optional):
88
+ The number of stages that each pipeline group will have,
89
+ interleaving as necessary. If None, no interleaving is
90
+ performed. For example, if tensor_model_parallel_size is 1,
91
+ pipeline_model_parallel_size is 4,
92
+ virtual_pipeline_model_parallel_size is 2, and there are
93
+ 16 transformer layers in the model, the model will be
94
+ split into 8 stages with two layers each and each GPU
95
+ would get 2 stages as such (layer number starting with 1):
96
+
97
+ GPU 0: [1, 2] [9, 10]
98
+ GPU 1: [3, 4] [11, 12]
99
+ GPU 2: [5, 6] [13, 14]
100
+ GPU 3: [7, 8] [15, 16]
101
+
102
+ pipeline_model_parallel_split_rank (int, optional):
103
+ For models with both an encoder and decoder, the rank in
104
+ pipeline to switch between encoder and decoder (i.e. the
105
+ first rank of the decoder). This allows the user to set
106
+ the pipeline parallel size of the encoder and decoder
107
+ independently. For example, if
108
+ pipeline_model_parallel_size is 8 and
109
+ pipeline_model_parallel_split_rank is 3, then ranks 0-2
110
+ will be the encoder and ranks 3-7 will be the decoder.
111
+
112
+ use_fp8 (bool, default = False):
113
+ Construct GPU groups needed for FP8 training, namely for
114
+ amax reduction across the product of the data-parallel and
115
+ tensor-parallel groups.
116
+
117
+ Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we
118
+ use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
119
+ the model pipeline. The present function will
120
+ create 8 tensor model-parallel groups, 4 pipeline model-parallel groups
121
+ and 8 data-parallel groups as:
122
+ 8 data_parallel groups:
123
+ [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15]
124
+ 8 tensor model-parallel groups:
125
+ [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15]
126
+ 4 pipeline model-parallel groups:
127
+ [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15]
128
+ Note that for efficiency, the caller should make sure adjacent ranks
129
+ are on the same DGX box. For example if we are using 2 DGX-1 boxes
130
+ with a total of 16 GPUs, rank 0 to 7 belong to the first box and
131
+ ranks 8 to 15 belong to the second box.
132
+
133
+ """
134
+ # Get world size and rank. Ensure some consistencies.
135
+ assert torch.distributed.is_initialized()
136
+ world_size: int = torch.distributed.get_world_size()
137
+
138
+ if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0:
139
+ raise RuntimeError(
140
+ f"world_size ({world_size}) is not divisible by tensor_model_parallel_size "
141
+ f"({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})"
142
+ )
143
+
144
+ enable_ds_sequence_parallel = sequence_parallel_size > 1
145
+ if enable_ds_sequence_parallel:
146
+ assert tensor_model_parallel_size == 1 and pipeline_model_parallel_size == 1, \
147
+ 'DeepSpeed\'s sequence parallel does not work with tensor parallel or pipeline parallel'
148
+
149
+ if world_size % sequence_parallel_size != 0:
150
+ raise RuntimeError(
151
+ f"world_size ({world_size}) is not divisible by sequence_parallel_size {sequence_parallel_size})"
152
+ )
153
+
154
+ data_parallel_size: int = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size * sequence_parallel_size)
155
+ sequence_data_parallel_size: int = sequence_parallel_size * data_parallel_size
156
+
157
+ num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size
158
+ num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
159
+ num_data_parallel_groups: int = world_size // data_parallel_size
160
+ num_sequence_parallel_groups: int = world_size // sequence_parallel_size
161
+ num_sequence_data_parallel_groups: int = world_size // sequence_parallel_size // data_parallel_size
162
+
163
+ if virtual_pipeline_model_parallel_size is not None:
164
+ if not pipeline_model_parallel_size > 2:
165
+ raise RuntimeError("pipeline-model-parallel size should be greater than 2 with " "interleaved schedule")
166
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
167
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
168
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0
169
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = virtual_pipeline_model_parallel_size
170
+
171
+ if pipeline_model_parallel_split_rank is not None:
172
+ global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
173
+ _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank
174
+
175
+ rank = torch.distributed.get_rank()
176
+
177
+ # Build the data-parallel groups.
178
+ global _DATA_PARALLEL_GROUP
179
+ global _DATA_PARALLEL_GROUP_GLOO
180
+ global _DATA_PARALLEL_GLOBAL_RANKS
181
+ assert _DATA_PARALLEL_GROUP is None, 'data parallel group is already initialized'
182
+ all_data_parallel_group_ranks = []
183
+ for i in range(pipeline_model_parallel_size):
184
+ start_rank = i * num_pipeline_model_parallel_groups
185
+ end_rank = (i + 1) * num_pipeline_model_parallel_groups
186
+
187
+ if sequence_parallel_size > 1:
188
+ tp_or_sp_size = sequence_parallel_size
189
+ else:
190
+ tp_or_sp_size = tensor_model_parallel_size
191
+
192
+ for j in range(tp_or_sp_size):
193
+ ranks = range(start_rank + j, end_rank, tp_or_sp_size)
194
+ all_data_parallel_group_ranks.append(list(ranks))
195
+ group = torch.distributed.new_group(ranks)
196
+ if use_distributed_optimizer:
197
+ group_gloo = torch.distributed.new_group(ranks, backend="gloo")
198
+ else:
199
+ group_gloo = None
200
+ if rank in ranks:
201
+ _DATA_PARALLEL_GROUP = group
202
+ _DATA_PARALLEL_GROUP_GLOO = group_gloo
203
+ _DATA_PARALLEL_GLOBAL_RANKS = ranks
204
+
205
+ # Build the sequence parallel groups.
206
+ global _SEQUENCE_PARALLEL_GROUP
207
+ assert _SEQUENCE_PARALLEL_GROUP is None, \
208
+ 'sequence parallel group is already initialized'
209
+ for i in range(num_sequence_parallel_groups):
210
+ ranks = range(i * sequence_parallel_size,
211
+ (i + 1) * sequence_parallel_size)
212
+ group = torch.distributed.new_group(ranks)
213
+ if rank in ranks:
214
+ _SEQUENCE_PARALLEL_GROUP = group
215
+
216
+ # Build the sequence data parallel groups.
217
+ global _SEQUENCE_DATA_PARALLEL_GROUP
218
+ assert _SEQUENCE_DATA_PARALLEL_GROUP is None, \
219
+ 'sequence data parallel group is already initialized'
220
+ all_data_sequence_parallel_group_ranks = []
221
+ if enable_ds_sequence_parallel:
222
+ for i in range(num_sequence_data_parallel_groups):
223
+ ranks = range(i * sequence_data_parallel_size,
224
+ (i + 1) * sequence_data_parallel_size)
225
+ group = torch.distributed.new_group(ranks)
226
+ all_data_sequence_parallel_group_ranks.append(list(ranks))
227
+ if rank in ranks:
228
+ _SEQUENCE_DATA_PARALLEL_GROUP = group
229
+ else:
230
+ _SEQUENCE_DATA_PARALLEL_GROUP = _DATA_PARALLEL_GROUP
231
+
232
+ # Build the model-parallel groups.
233
+ global _MODEL_PARALLEL_GROUP
234
+ assert _MODEL_PARALLEL_GROUP is None, 'model parallel group is already initialized'
235
+ num_model_parallel_groups = sequence_data_parallel_size if enable_ds_sequence_parallel else data_parallel_size
236
+ model_parallel_group_ranks = all_data_sequence_parallel_group_ranks if enable_ds_sequence_parallel else all_data_parallel_group_ranks
237
+ for i in range(num_model_parallel_groups):
238
+ ranks = [parallel_group_ranks[i] for parallel_group_ranks in model_parallel_group_ranks]
239
+ group = torch.distributed.new_group(ranks)
240
+ if rank in ranks:
241
+ _MODEL_PARALLEL_GROUP = group
242
+
243
+ # Build the tensor model-parallel groups.
244
+ global _TENSOR_MODEL_PARALLEL_GROUP
245
+ assert _TENSOR_MODEL_PARALLEL_GROUP is None, 'tensor model parallel group is already initialized'
246
+ for i in range(num_tensor_model_parallel_groups):
247
+ ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
248
+ group = torch.distributed.new_group(ranks)
249
+ if rank in ranks:
250
+ _TENSOR_MODEL_PARALLEL_GROUP = group
251
+
252
+
253
+ # Build the pipeline model-parallel groups and embedding groups
254
+ # (first and last rank in each pipeline model-parallel group).
255
+ global _PIPELINE_MODEL_PARALLEL_GROUP
256
+ global _PIPELINE_GLOBAL_RANKS
257
+ assert _PIPELINE_MODEL_PARALLEL_GROUP is None, 'pipeline model parallel group is already initialized'
258
+ global _EMBEDDING_GROUP
259
+ global _EMBEDDING_GLOBAL_RANKS
260
+ assert _EMBEDDING_GROUP is None, 'embedding group is already initialized'
261
+ global _POSITION_EMBEDDING_GROUP
262
+ global _POSITION_EMBEDDING_GLOBAL_RANKS
263
+ assert _POSITION_EMBEDDING_GROUP is None, 'position embedding group is already initialized'
264
+ for i in range(num_pipeline_model_parallel_groups):
265
+ ranks = range(i, world_size, num_pipeline_model_parallel_groups)
266
+ group = torch.distributed.new_group(ranks)
267
+ if rank in ranks:
268
+ _PIPELINE_MODEL_PARALLEL_GROUP = group
269
+ _PIPELINE_GLOBAL_RANKS = ranks
270
+ # Setup embedding group (to exchange gradients between
271
+ # first and last stages).
272
+ if len(ranks) > 1:
273
+ embedding_ranks = [ranks[0], ranks[-1]]
274
+ position_embedding_ranks = [ranks[0]]
275
+ if pipeline_model_parallel_split_rank is not None:
276
+ if ranks[pipeline_model_parallel_split_rank] not in embedding_ranks:
277
+ embedding_ranks = [ranks[0], ranks[pipeline_model_parallel_split_rank], ranks[-1]]
278
+ if ranks[pipeline_model_parallel_split_rank] not in position_embedding_ranks:
279
+ position_embedding_ranks = [ranks[0], ranks[pipeline_model_parallel_split_rank]]
280
+ else:
281
+ embedding_ranks = ranks
282
+ position_embedding_ranks = ranks
283
+
284
+ group = torch.distributed.new_group(embedding_ranks)
285
+ if rank in embedding_ranks:
286
+ _EMBEDDING_GROUP = group
287
+ if rank in ranks:
288
+ _EMBEDDING_GLOBAL_RANKS = embedding_ranks
289
+
290
+ group = torch.distributed.new_group(position_embedding_ranks)
291
+ if rank in position_embedding_ranks:
292
+ _POSITION_EMBEDDING_GROUP = group
293
+ if rank in ranks:
294
+ _POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
295
+
296
+ # Build the FP8 groups.
297
+ global _AMAX_REDUCTION_GROUP
298
+ assert _AMAX_REDUCTION_GROUP is None, \
299
+ 'FP8 amax reduction group is already initialized'
300
+ if use_fp8:
301
+ amax_group_size: int = tensor_model_parallel_size * data_parallel_size
302
+ num_amax_groups: int = world_size // amax_group_size
303
+ for i in range(num_amax_groups):
304
+ start_rank = i * amax_group_size
305
+ end_rank = (i + 1) * amax_group_size
306
+ ranks = range(start_rank, end_rank)
307
+ group = torch.distributed.new_group(ranks)
308
+ if rank in ranks:
309
+ _AMAX_REDUCTION_GROUP = group
310
+
311
+ # Initialize global memory buffer
312
+ # This isn't really "parallel state" but there isn't another good place to
313
+ # put this. If we end up with a more generic initialization of megatron-core
314
+ # we could stick it there
315
+ _set_global_memory_buffer()
316
+
317
+
318
+ def is_unitialized():
319
+ """Useful for code segments that may be accessed with or without mpu initialization"""
320
+ return _DATA_PARALLEL_GROUP is None
321
+
322
+
323
+ def model_parallel_is_initialized():
324
+ """Check if model and data parallel groups are initialized."""
325
+ if _TENSOR_MODEL_PARALLEL_GROUP is None or _PIPELINE_MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
326
+ return False
327
+ return True
328
+
329
+ def sequence_parallel_is_initialized():
330
+ """Check if sequence and data parallel groups are initialized."""
331
+ if _SEQUENCE_PARALLEL_GROUP is None or \
332
+ _DATA_PARALLEL_GROUP is None:
333
+ return False
334
+ return True
335
+
336
+ def sequence_data_parallel_is_initialized():
337
+ """Check if sequence data parallel groups are initialized."""
338
+ if _SEQUENCE_DATA_PARALLEL_GROUP is None:
339
+ return False
340
+ return True
341
+
342
+ def get_model_parallel_group():
343
+ """Get the model parallel group the caller rank belongs to."""
344
+ assert _MODEL_PARALLEL_GROUP is not None, 'model parallel group is not initialized'
345
+ return _MODEL_PARALLEL_GROUP
346
+
347
+
348
+ def get_tensor_model_parallel_group(check_initialized=True):
349
+ """Get the tensor model parallel group the caller rank belongs to."""
350
+ if check_initialized:
351
+ assert _TENSOR_MODEL_PARALLEL_GROUP is not None, 'tensor model parallel group is not initialized'
352
+ return _TENSOR_MODEL_PARALLEL_GROUP
353
+
354
+
355
+ def get_pipeline_model_parallel_group():
356
+ """Get the pipeline model parallel group the caller rank belongs to."""
357
+ assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, 'pipeline_model parallel group is not initialized'
358
+ return _PIPELINE_MODEL_PARALLEL_GROUP
359
+
360
+ def get_sequence_parallel_group():
361
+ """Get the sequence parallel group the caller rank belongs to."""
362
+ assert _SEQUENCE_PARALLEL_GROUP is not None, \
363
+ 'sequence parallel group is not initialized'
364
+ return _SEQUENCE_PARALLEL_GROUP
365
+
366
+
367
+ def get_sequence_data_parallel_group():
368
+ """Get the sequence parallel group the caller rank belongs to."""
369
+ assert _SEQUENCE_DATA_PARALLEL_GROUP is not None, \
370
+ 'sequence data parallel group is not initialized'
371
+ return _SEQUENCE_DATA_PARALLEL_GROUP
372
+
373
+
374
+ def get_data_parallel_group():
375
+ """Get the data parallel group the caller rank belongs to."""
376
+ assert _DATA_PARALLEL_GROUP is not None, 'data parallel group is not initialized'
377
+ return _DATA_PARALLEL_GROUP
378
+
379
+
380
+ def get_data_parallel_group_gloo():
381
+ """Get the data parallel group-gloo the caller rank belongs to."""
382
+ assert _DATA_PARALLEL_GROUP_GLOO is not None, \
383
+ 'data parallel group-gloo is not initialized'
384
+ return _DATA_PARALLEL_GROUP_GLOO
385
+
386
+
387
+ def get_embedding_group():
388
+ """Get the embedding group the caller rank belongs to."""
389
+ assert _EMBEDDING_GROUP is not None, 'embedding group is not initialized'
390
+ return _EMBEDDING_GROUP
391
+
392
+
393
+ def get_position_embedding_group():
394
+ """Get the position embedding group the caller rank belongs to."""
395
+ assert _POSITION_EMBEDDING_GROUP is not None, 'position embedding group is not initialized'
396
+ return _POSITION_EMBEDDING_GROUP
397
+
398
+
399
+ def get_amax_reduction_group():
400
+ """Get the FP8 amax reduction group the caller rank belongs to."""
401
+ assert _AMAX_REDUCTION_GROUP is not None, \
402
+ 'FP8 amax reduction group is not initialized'
403
+ return _AMAX_REDUCTION_GROUP
404
+
405
+
406
+ def set_tensor_model_parallel_world_size(world_size):
407
+ """Set the tensor model parallel size"""
408
+ global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
409
+ _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size
410
+
411
+ def set_sequence_parallel_world_size(world_size):
412
+ """Set the sequence parallel size"""
413
+ global _SEQUENCE_PARALLEL_WORLD_SIZE
414
+ _SEQUENCE_PARALLEL_WORLD_SIZE = world_size
415
+
416
+ def set_sequence_data_parallel_world_size(world_size):
417
+ """Set the sequence parallel size"""
418
+ global _SEQUENCE_DATA_PARALLEL_WORLD_SIZE
419
+ _SEQUENCE_DATA_PARALLEL_WORLD_SIZE = world_size
420
+
421
+ def set_pipeline_model_parallel_world_size(world_size):
422
+ """Set the pipeline model parallel size"""
423
+ global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
424
+ _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
425
+
426
+ def set_virtual_pipeline_model_parallel_world_size(world_size):
427
+ """Set the pipeline model parallel size"""
428
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
429
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
430
+
431
+ def set_virtual_pipeline_model_parallel_world_size(world_size):
432
+ """Set the virtual pipeline model parallel size"""
433
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
434
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
435
+
436
+
437
+ def get_tensor_model_parallel_world_size():
438
+ """Return world size for the tensor model parallel group."""
439
+ global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
440
+ if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:
441
+ return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
442
+ return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
443
+
444
+ def get_model_parallel_world_size():
445
+ assert get_pipeline_model_parallel_world_size() == 1, "legacy get_model_parallel_world_size is only supported if PP is disabled"
446
+ return get_tensor_model_parallel_world_size()
447
+
448
+ def get_sequence_parallel_world_size():
449
+ """Return world size for the sequence parallel group."""
450
+ global _SEQUENCE_PARALLEL_WORLD_SIZE
451
+ if _SEQUENCE_PARALLEL_WORLD_SIZE is not None:
452
+ return _SEQUENCE_PARALLEL_WORLD_SIZE
453
+ return torch.distributed.get_world_size(group=get_sequence_parallel_group())
454
+
455
+ def get_sequence_data_parallel_world_size():
456
+ """Return world size for the sequence parallel group."""
457
+ global _SEQUENCE_DATA_PARALLEL_WORLD_SIZE
458
+ if _SEQUENCE_DATA_PARALLEL_WORLD_SIZE is not None:
459
+ return _SEQUENCE_DATA_PARALLEL_WORLD_SIZE
460
+ return torch.distributed.get_world_size(group=get_sequence_data_parallel_group())
461
+
462
+ def get_pipeline_model_parallel_world_size():
463
+ """Return world size for the pipeline model parallel group."""
464
+ global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
465
+ if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None:
466
+ return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
467
+ return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group())
468
+
469
+
470
+ def set_tensor_model_parallel_rank(rank):
471
+ """Set tensor model parallel rank."""
472
+ global _MPU_TENSOR_MODEL_PARALLEL_RANK
473
+ _MPU_TENSOR_MODEL_PARALLEL_RANK = rank
474
+
475
+
476
+ def get_model_parallel_rank():
477
+ assert get_pipeline_model_parallel_world_size() == 1, "legacy get_model_parallel_rank is only supported if PP is disabled"
478
+ return get_tensor_model_parallel_rank()
479
+
480
+
481
+ def set_sequence_parallel_rank(rank):
482
+ """Set sequence parallel rank."""
483
+ global _SEQUENCE_PARALLEL_RANK
484
+ _SEQUENCE_PARALLEL_RANK = rank
485
+
486
+
487
+ def set_sequence_data_parallel_rank(rank):
488
+ """Set sequence parallel rank."""
489
+ global _SEQUENCE_DATA_PARALLEL_RANK
490
+ _SEQUENCE_DATA_PARALLEL_RANK = rank
491
+
492
+
493
+ def set_pipeline_model_parallel_rank(rank):
494
+ """Set pipeline model parallel rank."""
495
+ global _MPU_PIPELINE_MODEL_PARALLEL_RANK
496
+ _MPU_PIPELINE_MODEL_PARALLEL_RANK = rank
497
+
498
+
499
+ def set_pipeline_model_parallel_split_rank(rank):
500
+ """Set pipeline model parallel split rank."""
501
+ global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
502
+ _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = rank
503
+
504
+
505
+ def get_tensor_model_parallel_rank():
506
+ """Return my rank for the tensor model parallel group."""
507
+ global _MPU_TENSOR_MODEL_PARALLEL_RANK
508
+ if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:
509
+ return _MPU_TENSOR_MODEL_PARALLEL_RANK
510
+ return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
511
+
512
+
513
+ def get_pipeline_model_parallel_rank():
514
+ """Return my rank for the pipeline model parallel group."""
515
+ global _MPU_PIPELINE_MODEL_PARALLEL_RANK
516
+ if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None:
517
+ return _MPU_PIPELINE_MODEL_PARALLEL_RANK
518
+ return torch.distributed.get_rank(group=get_pipeline_model_parallel_group())
519
+
520
+
521
+ def get_pipeline_model_parallel_split_rank():
522
+ """Return pipeline model parallel split rank."""
523
+ global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
524
+ return _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
525
+
526
+
527
+ def get_sequence_parallel_rank():
528
+ """Return my rank for the sequence parallel group."""
529
+ global _SEQUENCE_PARALLEL_RANK
530
+ if _SEQUENCE_PARALLEL_RANK is not None:
531
+ return _SEQUENCE_PARALLEL_RANK
532
+ return torch.distributed.get_rank(group=get_sequence_parallel_group())
533
+
534
+
535
+ def get_sequence_data_parallel_rank():
536
+ """Return my rank for the sequence data parallel group."""
537
+ global _SEQUENCE_DATA_PARALLEL_RANK
538
+ if _SEQUENCE_DATA_PARALLEL_RANK is not None:
539
+ return _SEQUENCE_DATA_PARALLEL_RANK
540
+ return torch.distributed.get_rank(group=get_sequence_data_parallel_group())
541
+
542
+
543
+ def is_pipeline_first_stage(ignore_virtual=False):
544
+ """Return True if in the first pipeline model-parallel stage, False otherwise."""
545
+ if not ignore_virtual:
546
+ if (
547
+ get_virtual_pipeline_model_parallel_world_size() is not None
548
+ and get_virtual_pipeline_model_parallel_rank() != 0
549
+ ):
550
+ return False
551
+ return get_pipeline_model_parallel_rank() == 0
552
+
553
+
554
+ def is_pipeline_last_stage(ignore_virtual=False):
555
+ """Return True if in the last pipeline model-parallel stage, False otherwise."""
556
+ if not ignore_virtual:
557
+ virtual_pipeline_model_parallel_world_size = get_virtual_pipeline_model_parallel_world_size()
558
+ if virtual_pipeline_model_parallel_world_size is not None and get_virtual_pipeline_model_parallel_rank() != (
559
+ virtual_pipeline_model_parallel_world_size - 1
560
+ ):
561
+ return False
562
+ return get_pipeline_model_parallel_rank() == (get_pipeline_model_parallel_world_size() - 1)
563
+
564
+
565
+ def is_rank_in_embedding_group(ignore_virtual=False):
566
+ """Return true if current rank is in embedding group, False otherwise."""
567
+ rank = torch.distributed.get_rank()
568
+ global _EMBEDDING_GLOBAL_RANKS
569
+ if ignore_virtual:
570
+ return rank in _EMBEDDING_GLOBAL_RANKS
571
+ if rank in _EMBEDDING_GLOBAL_RANKS:
572
+ if rank == _EMBEDDING_GLOBAL_RANKS[0]:
573
+ return is_pipeline_first_stage(ignore_virtual=False)
574
+ elif rank == _EMBEDDING_GLOBAL_RANKS[-1]:
575
+ return is_pipeline_last_stage(ignore_virtual=False)
576
+ else:
577
+ return True
578
+ return False
579
+
580
+
581
+ def is_rank_in_position_embedding_group():
582
+ """Return true if current rank is in position embedding group, False otherwise."""
583
+ rank = torch.distributed.get_rank()
584
+ global _POSITION_EMBEDDING_GLOBAL_RANKS
585
+ return rank in _POSITION_EMBEDDING_GLOBAL_RANKS
586
+
587
+
588
+ def is_pipeline_stage_before_split(rank=None):
589
+ """Return True if pipeline stage executes encoder block for a model
590
+ with both encoder and decoder."""
591
+ if get_pipeline_model_parallel_world_size() == 1:
592
+ return True
593
+ if rank is None:
594
+ rank = get_pipeline_model_parallel_rank()
595
+ global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
596
+ if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
597
+ return True
598
+ if rank < _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
599
+ return True
600
+ return False
601
+
602
+
603
+ def is_pipeline_stage_after_split(rank=None):
604
+ """Return True if pipeline stage executes decoder block for a model
605
+ with both encoder and decoder."""
606
+ if get_pipeline_model_parallel_world_size() == 1:
607
+ return True
608
+ if rank is None:
609
+ rank = get_pipeline_model_parallel_rank()
610
+ global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK
611
+ if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None:
612
+ return True
613
+ if rank >= _PIPELINE_MODEL_PARALLEL_SPLIT_RANK:
614
+ return True
615
+ return False
616
+
617
+
618
+ def is_pipeline_stage_at_split():
619
+ """Return true if pipeline stage executes decoder block and next
620
+ stage executes encoder block for a model with both encoder and
621
+ decoder."""
622
+ rank = get_pipeline_model_parallel_rank()
623
+ return is_pipeline_stage_before_split(rank) and is_pipeline_stage_after_split(rank + 1)
624
+
625
+
626
+ def get_virtual_pipeline_model_parallel_rank():
627
+ """Return the virtual pipeline-parallel rank."""
628
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
629
+ return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
630
+
631
+
632
+ def set_virtual_pipeline_model_parallel_rank(rank):
633
+ """Set the virtual pipeline-parallel rank."""
634
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
635
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank
636
+
637
+
638
+ def get_virtual_pipeline_model_parallel_world_size():
639
+ """Return the virtual pipeline-parallel world size."""
640
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
641
+ return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
642
+
643
+
644
+ def set_virtual_pipeline_model_parallel_world_size(world_size):
645
+ """Set the virtual pipeline-parallel world size"""
646
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
647
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
648
+
649
+
650
+ def get_tensor_model_parallel_src_rank():
651
+ """Calculate the global rank corresponding to the first local rank
652
+ in the tensor model parallel group."""
653
+ global_rank = torch.distributed.get_rank()
654
+ local_world_size = get_tensor_model_parallel_world_size()
655
+ return (global_rank // local_world_size) * local_world_size
656
+
657
+
658
+ def get_sequence_parallel_src_rank():
659
+ """Calculate the global rank corresponding to the first local rank
660
+ in the sequence parallel group."""
661
+ global_rank = torch.distributed.get_rank()
662
+ local_world_size = get_sequence_parallel_world_size()
663
+ return (global_rank // local_world_size) * local_world_size
664
+
665
+
666
+ def get_data_parallel_src_rank():
667
+ """Calculate the global rank corresponding to the first local rank
668
+ in the data parallel group."""
669
+ assert _DATA_PARALLEL_GLOBAL_RANKS is not None, "Data parallel group is not initialized"
670
+ return _DATA_PARALLEL_GLOBAL_RANKS[0]
671
+
672
+
673
+ def get_pipeline_model_parallel_first_rank():
674
+ """Return the global rank of the first process in the pipeline for the
675
+ current tensor parallel group"""
676
+ assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized"
677
+ return _PIPELINE_GLOBAL_RANKS[0]
678
+
679
+
680
+ def get_pipeline_model_parallel_last_rank():
681
+ """Return the global rank of the last process in the pipeline for the
682
+ current tensor parallel group"""
683
+ assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized"
684
+ last_rank_local = get_pipeline_model_parallel_world_size() - 1
685
+ return _PIPELINE_GLOBAL_RANKS[last_rank_local]
686
+
687
+
688
+ def get_pipeline_model_parallel_next_rank():
689
+ """Return the global rank that follows the caller in the pipeline"""
690
+ assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized"
691
+ rank_in_pipeline = get_pipeline_model_parallel_rank()
692
+ world_size = get_pipeline_model_parallel_world_size()
693
+ return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
694
+
695
+
696
+ def get_pipeline_model_parallel_prev_rank():
697
+ """Return the global rank that preceeds the caller in the pipeline"""
698
+ assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized"
699
+ rank_in_pipeline = get_pipeline_model_parallel_rank()
700
+ world_size = get_pipeline_model_parallel_world_size()
701
+ return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
702
+
703
+
704
+ def get_data_parallel_world_size():
705
+ """Return world size for the data parallel group."""
706
+ return torch.distributed.get_world_size(group=get_data_parallel_group())
707
+
708
+
709
+ def get_data_parallel_rank():
710
+ """Return my rank for the data parallel group."""
711
+ return torch.distributed.get_rank(group=get_data_parallel_group())
712
+
713
+
714
+ def _set_global_memory_buffer():
715
+ """Initialize global buffer"""
716
+ global _GLOBAL_MEMORY_BUFFER
717
+ assert _GLOBAL_MEMORY_BUFFER is None, 'global memory buffer is already initialized'
718
+ _GLOBAL_MEMORY_BUFFER = GlobalMemoryBuffer()
719
+
720
+
721
+ def get_global_memory_buffer():
722
+ """Return the global GlobalMemoryBuffer object"""
723
+ assert _GLOBAL_MEMORY_BUFFER is not None, 'global memory buffer is not initialized'
724
+ return _GLOBAL_MEMORY_BUFFER
725
+
726
+
727
+ def destroy_global_memory_buffer():
728
+ """Sets the global memory buffer to None"""
729
+ global _GLOBAL_MEMORY_BUFFER
730
+ _GLOBAL_MEMORY_BUFFER = None
731
+
732
+
733
+ def destroy_model_parallel():
734
+ """Set the groups to none."""
735
+ global _MODEL_PARALLEL_GROUP
736
+ _MODEL_PARALLEL_GROUP = None
737
+ global _TENSOR_MODEL_PARALLEL_GROUP
738
+ _TENSOR_MODEL_PARALLEL_GROUP = None
739
+ global _PIPELINE_MODEL_PARALLEL_GROUP
740
+ _PIPELINE_MODEL_PARALLEL_GROUP = None
741
+ global _DATA_PARALLEL_GROUP
742
+ _DATA_PARALLEL_GROUP = None
743
+ global _SEQUENCE_PARALLEL_GROUP
744
+ _SEQUENCE_PARALLEL_GROUP = None
745
+ global _SEQUENCE_DATA_PARALLEL_GROUP
746
+ _SEQUENCE_DATA_PARALLEL_GROUP = None
747
+ global _EMBEDDING_GROUP
748
+ _EMBEDDING_GROUP = None
749
+ global _POSITION_EMBEDDING_GROUP
750
+ _POSITION_EMBEDDING_GROUP = None
751
+ global _AMAX_REDUCTION_GROUP
752
+ _AMAX_REDUCTION_GROUP = None
753
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
754
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None
755
+ global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
756
+ _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
757
+ global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
758
+ _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None
759
+ global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
760
+ _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None
761
+ global _MPU_TENSOR_MODEL_PARALLEL_RANK
762
+ _MPU_TENSOR_MODEL_PARALLEL_RANK = None
763
+ global _MPU_PIPELINE_MODEL_PARALLEL_RANK
764
+ _MPU_PIPELINE_MODEL_PARALLEL_RANK = None
765
+ global _GLOBAL_MEMORY_BUFFER
766
+ _GLOBAL_MEMORY_BUFFER = None
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ pybind11
2
+ torch
3
+ regex
4
+ einops
5
+ datasets
6
+ sentencepiece
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .cross_entropy import vocab_parallel_cross_entropy
2
+ from .data import broadcast_data
3
+
4
+ from .layers import (
5
+ ColumnParallelLinear,
6
+ RowParallelLinear,
7
+ VocabParallelEmbedding,
8
+ set_tensor_model_parallel_attributes,
9
+ set_defaults_if_not_set_tensor_model_parallel_attributes,
10
+ copy_tensor_model_parallel_attributes,
11
+ param_is_not_tensor_parallel_duplicate,
12
+ linear_with_grad_accumulation_and_async_allreduce
13
+
14
+ )
15
+
16
+ from .mappings import (
17
+ copy_to_tensor_model_parallel_region,
18
+ gather_from_tensor_model_parallel_region,
19
+ gather_from_sequence_parallel_region,
20
+ scatter_to_tensor_model_parallel_region,
21
+ scatter_to_sequence_parallel_region,
22
+ )
23
+
24
+ from .random import (
25
+ checkpoint,
26
+ get_cuda_rng_tracker,
27
+ model_parallel_cuda_manual_seed,
28
+ model_parallel_reconfigure_tp_seed,
29
+ init_checkpointed_activations_memory_buffer,
30
+ reset_checkpointed_activations_memory_buffer,
31
+ )
32
+
33
+ from .utils import (
34
+ split_tensor_along_last_dim,
35
+ split_tensor_into_1d_equal_chunks,
36
+ gather_split_1d_tensor,
37
+ )
38
+
39
+ __all__ = [
40
+ # cross_entropy.py
41
+ "vocab_parallel_cross_entropy",
42
+ # data.py
43
+ "broadcast_data",
44
+ #layers.py
45
+ "ColumnParallelLinear",
46
+ "RowParallelLinear",
47
+ "VocabParallelEmbedding",
48
+ "set_tensor_model_parallel_attributes",
49
+ "set_defaults_if_not_set_tensor_model_parallel_attributes",
50
+ "copy_tensor_model_parallel_attributes",
51
+ "param_is_not_tensor_parallel_duplicate",
52
+ "linear_with_grad_accumulation_and_async_allreduce",
53
+ # mappings.py
54
+ "copy_to_tensor_model_parallel_region",
55
+ "gather_from_tensor_model_parallel_region",
56
+ "gather_from_sequence_parallel_region",
57
+ # "reduce_from_tensor_model_parallel_region",
58
+ "scatter_to_tensor_model_parallel_region",
59
+ "scatter_to_sequence_parallel_region",
60
+ # random.py
61
+ "checkpoint",
62
+ "get_cuda_rng_tracker",
63
+ "model_parallel_cuda_manual_seed",
64
+ "init_checkpointed_activations_memory_buffer",
65
+ "reset_checkpointed_activations_memory_buffer",
66
+ # utils.py
67
+ "split_tensor_along_last_dim",
68
+ "split_tensor_into_1d_equal_chunks",
69
+ "gather_split_1d_tensor",
70
+ ]
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/random.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/cross_entropy.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+
5
+ from megatron.core.parallel_state import (
6
+ get_tensor_model_parallel_group,
7
+ get_tensor_model_parallel_rank,
8
+ get_tensor_model_parallel_world_size
9
+ )
10
+
11
+ from .utils import VocabUtility
12
+
13
+
14
+ class _VocabParallelCrossEntropy(torch.autograd.Function):
15
+
16
+ @staticmethod
17
+ def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0):
18
+
19
+ # Maximum value along vocab dimension across all GPUs.
20
+ logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
21
+ torch.distributed.all_reduce(logits_max,
22
+ op=torch.distributed.ReduceOp.MAX,
23
+ group=get_tensor_model_parallel_group())
24
+ # Subtract the maximum value.
25
+ vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1)
26
+
27
+ # Get the partition's vocab indecies
28
+ get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
29
+ partition_vocab_size = vocab_parallel_logits.size()[-1]
30
+ rank = get_tensor_model_parallel_rank()
31
+ world_size = get_tensor_model_parallel_world_size()
32
+ vocab_start_index, vocab_end_index = get_vocab_range(
33
+ partition_vocab_size, rank, world_size)
34
+
35
+ # Create a mask of valid vocab ids (1 means it needs to be masked).
36
+ target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
37
+ masked_target = target.clone() - vocab_start_index
38
+ masked_target[target_mask] = 0
39
+
40
+ # Get predicted-logits = logits[target].
41
+ # For Simplicity, we convert logits to a 2-D tensor with size
42
+ # [*, partition-vocab-size] and target to a 1-D tensor of size [*].
43
+ logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
44
+ masked_target_1d = masked_target.view(-1)
45
+ arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
46
+ device=logits_2d.device)
47
+ predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
48
+ predicted_logits_1d = predicted_logits_1d.clone().contiguous()
49
+ predicted_logits = predicted_logits_1d.view_as(target)
50
+ predicted_logits[target_mask] = 0.0
51
+ # All reduce is needed to get the chunks from other GPUs.
52
+ torch.distributed.all_reduce(predicted_logits,
53
+ op=torch.distributed.ReduceOp.SUM,
54
+ group=get_tensor_model_parallel_group())
55
+
56
+ # Sum of exponential of logits along vocab dimension across all GPUs.
57
+ exp_logits = vocab_parallel_logits
58
+ torch.exp(vocab_parallel_logits, out=exp_logits)
59
+ sum_exp_logits = exp_logits.sum(dim=-1)
60
+ torch.distributed.all_reduce(sum_exp_logits,
61
+ op=torch.distributed.ReduceOp.SUM,
62
+ group=get_tensor_model_parallel_group())
63
+
64
+ # Loss = log(sum(exp(logits))) - predicted-logit.
65
+ loss = torch.log(sum_exp_logits) - predicted_logits
66
+
67
+ # Normalize and optionally smooth logits
68
+ exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
69
+
70
+ vocab_size = exp_logits.size(-1)
71
+ if label_smoothing > 0:
72
+ """
73
+ We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth.
74
+ = (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt})
75
+ = (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
76
+ = ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i
77
+ = (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i
78
+ = (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K
79
+ From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py
80
+ """
81
+ assert 1.0 > label_smoothing > 0.0
82
+ smoothing = label_smoothing * vocab_size / (vocab_size - 1)
83
+
84
+ # Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs.
85
+ log_probs = torch.log(exp_logits)
86
+ mean_log_probs = log_probs.mean(dim=-1)
87
+ loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs
88
+
89
+ ctx.label_smoothing, ctx.vocab_size = label_smoothing, vocab_size
90
+ ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
91
+
92
+ # Store softmax, target-mask and masked-target for backward pass.
93
+ ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
94
+
95
+ return loss
96
+
97
+ @staticmethod
98
+ def backward(ctx, grad_output):
99
+
100
+ # Retreive tensors from the forward path.
101
+ softmax, target_mask, masked_target_1d = ctx.saved_tensors
102
+ label_smoothing, vocab_size = ctx.label_smoothing, ctx.vocab_size
103
+
104
+ # All the inputs have softmax as thier gradient.
105
+ grad_input = softmax
106
+ # For simplicity, work with the 2D gradient.
107
+ partition_vocab_size = softmax.size()[-1]
108
+ grad_2d = grad_input.view(-1, partition_vocab_size)
109
+
110
+ # Add the gradient from matching classes.
111
+ arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
112
+ device=grad_2d.device)
113
+
114
+ softmax_update = 1.0 - target_mask.view(-1).float()
115
+
116
+ if label_smoothing > 0:
117
+ smoothing = label_smoothing * vocab_size / (vocab_size - 1)
118
+ grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update
119
+ average_grad = 1 / vocab_size
120
+ grad_2d[arange_1d, :] -= smoothing * average_grad
121
+ else:
122
+ grad_2d[arange_1d, masked_target_1d] -= softmax_update
123
+
124
+ # Finally elementwise multiplication with the output gradients.
125
+ grad_input.mul_(grad_output.unsqueeze(dim=-1))
126
+
127
+ return grad_input, None, None
128
+
129
+
130
+ def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing=0.0):
131
+ """
132
+ Performs cross entropy loss when logits are split across tensor parallel ranks
133
+
134
+ Arguments:
135
+ vocab_parallel_logits: logits split across tensor parallel ranks
136
+ dimension is [sequence_length, batch_size, hidden_size]
137
+
138
+ target: correct vocab ids of dimseion [sequence_length, micro_batch_size]
139
+
140
+ lobal_smoothing: smoothing factor, must be in range [0.0, 1.0)
141
+ default is no smoothing (=0.0)
142
+ """
143
+ return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing)
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/layers.py ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ # Parts of the code here are adapted from PyTorch
5
+ # repo: https://github.com/pytorch/pytorch
6
+
7
+ import math
8
+ import os
9
+ from typing import Optional, Callable
10
+ import warnings
11
+ from packaging import version
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ import torch.nn.init as init
16
+ from torch.nn.parameter import Parameter
17
+
18
+ from torch.cuda.amp import custom_fwd, custom_bwd
19
+
20
+ from megatron.core.model_parallel_config import ModelParallelConfig
21
+
22
+ from megatron.core.parallel_state import (
23
+ get_tensor_model_parallel_rank,
24
+ get_tensor_model_parallel_world_size,
25
+ get_tensor_model_parallel_group,
26
+ get_global_memory_buffer,
27
+ is_pipeline_first_stage,
28
+ )
29
+ from megatron import get_args
30
+ from megatron.global_vars import get_num_microbatches
31
+ from .mappings import (
32
+ copy_to_tensor_model_parallel_region,
33
+ gather_from_tensor_model_parallel_region,
34
+ gather_from_sequence_parallel_region,
35
+ reduce_from_tensor_model_parallel_region,
36
+ scatter_to_tensor_model_parallel_region,
37
+ reduce_scatter_to_sequence_parallel_region,
38
+ )
39
+
40
+ from .random import get_cuda_rng_tracker
41
+ from .utils import (
42
+ divide,
43
+ split_tensor_along_last_dim,
44
+ VocabUtility,
45
+ )
46
+
47
+ import deepspeed.runtime.activation_checkpointing.checkpointing as ds_checkpointing
48
+ from deepspeed.accelerator import get_accelerator
49
+
50
+ _grad_accum_fusion_available = True
51
+ try:
52
+ import fused_weight_gradient_mlp_cuda
53
+ except ImportError:
54
+ _grad_accum_fusion_available = False
55
+
56
+ try:
57
+ import habana_frameworks.torch.hpex.experimental.transformer_engine as te
58
+ except ImportError:
59
+ if get_accelerator().device_name() == 'hpu' and get_args().transformer_impl == "transformer_engine":
60
+ raise RuntimeError(
61
+ "Device name is hpu and transformer implementation is transformer_engine"
62
+ "but couldn't import habana_frameworks.torch.hpex.experimental.transformer_engine"
63
+ )
64
+
65
+ _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {'tensor_model_parallel': False,
66
+ 'partition_dim': -1,
67
+ 'partition_stride': 1}
68
+
69
+ def param_is_not_tensor_parallel_duplicate(param):
70
+ return (hasattr(param, 'tensor_model_parallel') and
71
+ param.tensor_model_parallel) or (
72
+ get_tensor_model_parallel_rank() == 0)
73
+
74
+
75
+ def set_tensor_model_parallel_attributes(tensor, is_parallel, dim, stride):
76
+ # Make sure the attributes are not set.
77
+ for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
78
+ assert not hasattr(tensor, attribute)
79
+ # Set the attributes.
80
+ setattr(tensor, 'tensor_model_parallel', is_parallel)
81
+ setattr(tensor, 'partition_dim', dim)
82
+ setattr(tensor, 'partition_stride', stride)
83
+
84
+
85
+ def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor):
86
+ def maybe_set(attribute, value):
87
+ if not hasattr(tensor, attribute):
88
+ setattr(tensor, attribute, value)
89
+ for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
90
+ maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute])
91
+
92
+
93
+ def copy_tensor_model_parallel_attributes(destination_tensor, source_tensor):
94
+ def maybe_copy(attribute):
95
+ if hasattr(source_tensor, attribute):
96
+ setattr(destination_tensor, attribute,
97
+ getattr(source_tensor, attribute))
98
+ for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
99
+ maybe_copy(attribute)
100
+
101
+
102
+ def _initialize_affine_weight_gpu(weight, init_method,
103
+ partition_dim, stride=1):
104
+ """Initialize affine weight for model parallel on GPU."""
105
+
106
+ set_tensor_model_parallel_attributes(tensor=weight,
107
+ is_parallel=True,
108
+ dim=partition_dim,
109
+ stride=stride)
110
+
111
+ with get_cuda_rng_tracker().fork():
112
+ init_method(weight)
113
+
114
+
115
+ def _initialize_affine_weight_cpu(weight, output_size, input_size,
116
+ per_partition_size, partition_dim,
117
+ init_method, stride=1,
118
+ return_master_weight=False,
119
+ *, params_dtype=torch.float32):
120
+ """Initialize affine weight for model parallel.
121
+
122
+ Build the master weight on all processes and scatter
123
+ the relevant chunk."""
124
+
125
+ set_tensor_model_parallel_attributes(tensor=weight,
126
+ is_parallel=True,
127
+ dim=partition_dim,
128
+ stride=stride)
129
+
130
+ # Initialize master weight
131
+ master_weight = torch.empty(output_size, input_size,
132
+ dtype=torch.float,
133
+ requires_grad=False)
134
+ init_method(master_weight)
135
+ master_weight = master_weight.to(dtype=params_dtype)
136
+
137
+ # Split and copy
138
+ per_partition_per_stride_size = divide(per_partition_size, stride)
139
+ weight_list = torch.split(master_weight, per_partition_per_stride_size,
140
+ dim=partition_dim)
141
+ rank = get_tensor_model_parallel_rank()
142
+ world_size = get_tensor_model_parallel_world_size()
143
+ my_weight_list = weight_list[rank::world_size]
144
+
145
+ with torch.no_grad():
146
+ torch.cat(my_weight_list, dim=partition_dim, out=weight)
147
+ if return_master_weight:
148
+ return master_weight
149
+ return None
150
+
151
+
152
+ # This class encapsulates the behavior related to two mechanisms: hpu graph and amax measuring interval
153
+ class FP8ModuleRunner():
154
+ def __init__(self, module, measure_interval: int=1, cache_fp8_weight_fwd=False):
155
+ self.module = module
156
+ self.measure_interval = measure_interval
157
+ self.cache_fp8_weight_fwd = cache_fp8_weight_fwd
158
+ self.run_cnt = 0
159
+
160
+ def _is_first_microbatch(self):
161
+ if not self.cache_fp8_weight_fwd:
162
+ return None
163
+
164
+ return self.run_cnt % get_num_microbatches() in [1,2]
165
+
166
+ def __call__(self, input, weight, bias=None):
167
+ self.run_cnt += 1
168
+ measure = self.measure_interval == 1 or self.run_cnt % self.measure_interval == 1
169
+ te.fp8.set_measurement_mode(manual=True, manual_value=measure)
170
+
171
+ is_first_microbatch = self._is_first_microbatch()
172
+
173
+ return self.module(input, weight, bias, is_first_microbatch=is_first_microbatch)
174
+
175
+
176
+ class VocabParallelEmbedding(torch.nn.Module):
177
+ """Embedding parallelized in the vocabulary dimension.
178
+
179
+ This is mainly adapted from torch.nn.Embedding and all the default
180
+ values are kept.
181
+ Arguments:
182
+ num_embeddings: vocabulary size.
183
+ embedding_dim: size of hidden state.
184
+
185
+ Keyword Arguments:
186
+ config: A megatron.core.ModelParallelConfig object
187
+ """
188
+
189
+ def __init__(self, num_embeddings: int, embedding_dim: int, *,
190
+ init_method: Callable,
191
+ config: ModelParallelConfig):
192
+ super(VocabParallelEmbedding, self).__init__()
193
+ # Keep the input dimensions.
194
+ self.num_embeddings = num_embeddings
195
+ self.embedding_dim = embedding_dim
196
+ # Set the detauls for compatibility.
197
+ self.padding_idx = None
198
+ self.max_norm = None
199
+ self.norm_type = 2.
200
+ self.scale_grad_by_freq = False
201
+ self.sparse = False
202
+ self._weight = None
203
+ self.tensor_model_parallel_size = get_tensor_model_parallel_world_size()
204
+ # Divide the weight matrix along the vocaburaly dimension.
205
+ self.vocab_start_index, self.vocab_end_index = \
206
+ VocabUtility.vocab_range_from_global_vocab_size(
207
+ self.num_embeddings, get_tensor_model_parallel_rank(),
208
+ self.tensor_model_parallel_size)
209
+ self.num_embeddings_per_partition = self.vocab_end_index - \
210
+ self.vocab_start_index
211
+
212
+ # Allocate weights and initialize.
213
+ args = get_args()
214
+ # only the first stage embedding runs this class' forward. The head's embedding does its own
215
+ # thing, so don't waste memory allocating LN weights.
216
+ self.layer_norm = None
217
+ if is_pipeline_first_stage() and args.embed_layernorm:
218
+ from megatron.model import LayerNorm
219
+ self.layer_norm = LayerNorm(embedding_dim, sequence_parallel=config.sequence_parallel)
220
+
221
+ # Allocate weights and initialize.
222
+ if config.use_cpu_initialization:
223
+ self.weight = Parameter(torch.empty(
224
+ self.num_embeddings_per_partition, self.embedding_dim,
225
+ dtype=config.params_dtype))
226
+ if config.perform_initialization:
227
+ _initialize_affine_weight_cpu(
228
+ self.weight, self.num_embeddings, self.embedding_dim,
229
+ self.num_embeddings_per_partition, 0, init_method,
230
+ params_dtype=config.params_dtype)
231
+ else:
232
+ self.weight = Parameter(torch.empty(
233
+ self.num_embeddings_per_partition, self.embedding_dim,
234
+ device=get_accelerator().current_device_name(), dtype=config.params_dtype))
235
+ if config.perform_initialization:
236
+ _initialize_affine_weight_gpu(self.weight, init_method,
237
+ partition_dim=0, stride=1)
238
+
239
+ def forward(self, input_):
240
+ if self.tensor_model_parallel_size > 1:
241
+ # Build the mask.
242
+ input_mask = (input_ < self.vocab_start_index) | \
243
+ (input_ >= self.vocab_end_index)
244
+ # Mask the input.
245
+ masked_input = input_.clone() - self.vocab_start_index
246
+ masked_input[input_mask] = 0
247
+ else:
248
+ masked_input = input_
249
+ # Get the embeddings.
250
+ output_parallel = F.embedding(masked_input, self.weight,
251
+ self.padding_idx, self.max_norm,
252
+ self.norm_type, self.scale_grad_by_freq,
253
+ self.sparse)
254
+ # Mask the output embedding.
255
+ if self.tensor_model_parallel_size > 1:
256
+ output_parallel[input_mask, :] = 0.0
257
+ # Reduce across all the model parallel GPUs.
258
+ output = reduce_from_tensor_model_parallel_region(output_parallel)
259
+
260
+ if self.layer_norm is not None:
261
+ output = self.layer_norm(output)
262
+
263
+ return output
264
+
265
+
266
+ class SequenceParallelPositionEmbedding(torch.nn.Module):
267
+ """Embedding parallelized in the sequence dimension.
268
+
269
+ Arguments:
270
+ sequence_length: max sequence length.
271
+ embedding_dim: size of hidden state.
272
+ """
273
+
274
+ def __init__(self, sequence_length, embedding_dim):
275
+ super(SequenceParallelPositionEmbedding, self).__init__()
276
+ sequence_parallel_size = get_tensor_model_parallel_world_size()
277
+ assert sequence_length % sequence_parallel_size == 0
278
+ local_sequence_length = sequence_length // sequence_parallel_size
279
+ self.offset = local_sequence_length * get_tensor_model_parallel_rank()
280
+ self.local_embeddings = torch.nn.Embedding(
281
+ local_sequence_length, embedding_dim)
282
+
283
+ def forward(self, position_ids):
284
+ return self.local_embeddings(position_ids - self.offset)
285
+
286
+
287
+ class LinearWithGradAccumulationAndAsyncCommunication(torch.autograd.Function):
288
+ """See linear_with_grad_accumulation_and_async_allreduce"""
289
+
290
+ @staticmethod
291
+ @custom_fwd
292
+ def forward(ctx, input, weight, bias, gradient_accumulation_fusion,
293
+ async_grad_allreduce, sequence_parallel):
294
+ ctx.save_for_backward(input, weight)
295
+ ctx.use_bias = bias is not None
296
+ ctx.gradient_accumulation_fusion = gradient_accumulation_fusion
297
+ ctx.async_grad_allreduce = async_grad_allreduce
298
+ ctx.sequence_parallel = sequence_parallel
299
+
300
+ if sequence_parallel:
301
+ world_size = get_tensor_model_parallel_world_size()
302
+ dim_size = list(input.size())
303
+ dim_size[0] = dim_size[0] * world_size
304
+
305
+ all_gather_buffer = \
306
+ get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu")
307
+
308
+ if version.parse(torch.__version__) >= version.parse('1.13'):
309
+ torch.distributed.all_gather_into_tensor(
310
+ all_gather_buffer,
311
+ input,
312
+ group=get_tensor_model_parallel_group())
313
+ else:
314
+ torch.distributed._all_gather_base(
315
+ all_gather_buffer,
316
+ input,
317
+ group=get_tensor_model_parallel_group())
318
+
319
+ total_input = all_gather_buffer
320
+ else:
321
+ total_input = input
322
+
323
+ # output = torch.matmul(total_input, weight.t())
324
+ # if bias is not None:
325
+ # output = output + bias
326
+ output = F.linear(total_input, weight, bias)
327
+ return output
328
+
329
+ @staticmethod
330
+ @custom_bwd
331
+ def backward(ctx, grad_output):
332
+ input, weight = ctx.saved_tensors
333
+ use_bias = ctx.use_bias
334
+
335
+ if ctx.sequence_parallel:
336
+ world_size = get_tensor_model_parallel_world_size()
337
+ dim_size = list(input.size())
338
+ dim_size[0] = dim_size[0] * world_size
339
+
340
+ all_gather_buffer = \
341
+ get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu")
342
+
343
+ if version.parse(torch.__version__) >= version.parse('1.13'):
344
+ handle = torch.distributed.all_gather_into_tensor(
345
+ all_gather_buffer,
346
+ input,
347
+ group=get_tensor_model_parallel_group(), async_op=True)
348
+ else:
349
+ handle = torch.distributed._all_gather_base(
350
+ all_gather_buffer,
351
+ input,
352
+ group=get_tensor_model_parallel_group(), async_op=True)
353
+
354
+ # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the
355
+ # gather is scheduled before the input gradient computation
356
+ total_input = all_gather_buffer
357
+ else:
358
+ total_input = input
359
+ grad_input = grad_output.matmul(weight)
360
+
361
+ if ctx.sequence_parallel:
362
+ handle.wait()
363
+
364
+ # Doing gather + slicing during the NeMo forward pass can make this tensor
365
+ # not be contiguous. PyTorch only checks if the tensor is contiguous, and only
366
+ # clones it if it's not contiguous:
367
+ # https://github.com/pytorch/pytorch/blob/c47cf9bc7f9e02f649ab4ed53fe4d35732c92ab6/torch/_refs/__init__.py#L2761
368
+ grad_output = grad_output.contiguous()
369
+ # Convert the tensor shapes to 2D for execution compatibility
370
+ if len(grad_output.shape) == 3:
371
+ grad_output = grad_output.view(grad_output.shape[0] * grad_output.shape[1],
372
+ grad_output.shape[2])
373
+ total_input = total_input.view(total_input.shape[0] * total_input.shape[1],
374
+ total_input.shape[2])
375
+ else:
376
+ # Somehow when DeepSpeed MoE is used, grad_output could have 4 dimensions.
377
+ # TODO: May need further investigation
378
+ total_input = total_input.contiguous()
379
+ grad_output = grad_output.view(-1, grad_output.shape[-1])
380
+ total_input = total_input.view(-1, total_input.shape[-1])
381
+
382
+ if ctx.async_grad_allreduce:
383
+ # Asynchronous all-reduce
384
+ handle = torch.distributed.all_reduce(
385
+ grad_input, group=get_tensor_model_parallel_group(), async_op=True)
386
+ # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the
387
+ # all-reduce is scheduled before the weight gradient computation
388
+
389
+ if ctx.sequence_parallel:
390
+ assert not ctx.async_grad_allreduce
391
+ dim_size = list(input.size())
392
+ sub_grad_input = torch.empty(dim_size, dtype=input.dtype,
393
+ device=get_accelerator().current_device_name(),
394
+ requires_grad=False)
395
+ # reduce_scatter
396
+ handle = torch.distributed._reduce_scatter_base(sub_grad_input, grad_input,
397
+ group=get_tensor_model_parallel_group(),
398
+ async_op=True)
399
+ # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the
400
+ # reduce scatter is scheduled before the weight gradient computation
401
+
402
+ # TODO: temporary commented
403
+ # if ctx.gradient_accumulation_fusion:
404
+ # if weight.main_grad.dtype == torch.float32:
405
+ # fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, weight.main_grad)
406
+ # elif weight.main_grad.dtype in (torch.float16, torch.bfloat16):
407
+ # fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, weight.main_grad)
408
+ # else:
409
+ # raise RuntimeError("Unsupported gradient type for gradient accumulation fusion")
410
+ # grad_weight = None
411
+ # else:
412
+ # grad_weight = grad_output.t().matmul(total_input)
413
+ grad_weight = grad_output.t().matmul(total_input)
414
+ grad_bias = grad_output.sum(dim=0) if use_bias else None
415
+
416
+ if ctx.sequence_parallel:
417
+ handle.wait()
418
+ return sub_grad_input, grad_weight, grad_bias, None, None, None
419
+
420
+ if ctx.async_grad_allreduce:
421
+ handle.wait()
422
+
423
+ return grad_input, grad_weight, grad_bias, None, None, None
424
+
425
+ def linear_with_grad_accumulation_and_async_allreduce(
426
+ input: torch.Tensor,
427
+ weight: torch.Tensor,
428
+ bias: Optional[torch.Tensor],
429
+ gradient_accumulation_fusion: bool,
430
+ async_grad_allreduce: bool,
431
+ sequence_parallel: bool,
432
+ ) -> torch.Tensor:
433
+ """Linear layer execution with asynchronous communication and
434
+ gradient accumulation fusion in backprop.
435
+
436
+ This has the option to accumulate the result of backprop
437
+ calculation into an existing gradient buffer, preventing the need
438
+ to do an additional addition kernel after the gradient
439
+ calculation.
440
+
441
+ Additionally, the tensor parallel all reduce of the input
442
+ gradients can be done asynchronously with the calculation of
443
+ the weight gradients.
444
+
445
+ In the case of sequence parallelism, the reduce scatter of the
446
+ input gradients is done asynchronously with the calcluation of the
447
+ weight gradients.
448
+
449
+ Use of this module requires that the environment variable
450
+ CUDA_DEVICE_MAX_CONNECTIONS=1. There are a few collective
451
+ operations, noted in the code, that should be scheduled before
452
+ compute kernels to overlap the communication with the computation,
453
+ which is necessary for a speedup but not for correctness so that
454
+ ordering isn't imposed by the scheduler. Setting
455
+ CUDA_DEVICE_MAX_CONNECTIONS=1 forces the kernels to be scheduled
456
+ in the order they are called.
457
+
458
+ Arguments:
459
+
460
+ input (torch.Tensor required): input like torch.nn.functional.linear
461
+
462
+ weight (torch.Tensor required): weight like torch.nn.functional.linear
463
+
464
+ bias (torch.Tensor optional): bias like torch.nn.functional.linear
465
+
466
+ gradient_accumulation_fusion (bool required): Perform the gradient
467
+ accumulation fusion, requires the custom CUDA extension
468
+ fused_weight_gradient_mlp_cuda module. To use
469
+ gradient_accumulation_fusion you must install APEX with
470
+ --cpp_ext and --cuda_ext. For example: "pip install
471
+ --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\"
472
+ " Note that the extension requires CUDA>=11. Otherwise, you
473
+ must turn off gradient accumulation fusion."
474
+
475
+ async_grad_allreduce (bool required): Do the allreduce of input
476
+ gradients asyncronously with the computation of weight
477
+ gradients. If sequence_parallel is True, this must be
478
+ False, as no all reduce is performed.
479
+
480
+ sequence_parallel (bool required): Indicates that sequence
481
+ parallelism is used and thus in the forward pass the input is
482
+ all gathered, and the backward pass the input gradients are
483
+ reduce scattered.
484
+ """
485
+ if not sequence_parallel:
486
+ return F.linear(input, weight, bias)
487
+ args = [
488
+ input,
489
+ weight,
490
+ bias,
491
+ gradient_accumulation_fusion,
492
+ async_grad_allreduce,
493
+ sequence_parallel,
494
+ ]
495
+
496
+ if not linear_with_grad_accumulation_and_async_allreduce.warned:
497
+ if get_accelerator().device_name() == "cuda" and os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1":
498
+ if sequence_parallel:
499
+ warnings.warn(
500
+ "When using sequence parallelism it is recommended to set the "
501
+ "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for "
502
+ "maximum speedup")
503
+ linear_with_grad_accumulation_and_async_allreduce.warned = True
504
+
505
+ if async_grad_allreduce:
506
+ warnings.warn(
507
+ "When using async grad allreduce it is recommended to set the "
508
+ "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for "
509
+ "maximum speedup")
510
+ linear_with_grad_accumulation_and_async_allreduce.warned = True
511
+
512
+ return LinearWithGradAccumulationAndAsyncCommunication.apply(*args)
513
+
514
+ linear_with_grad_accumulation_and_async_allreduce.warned = False
515
+
516
+ class ColumnParallelLinear(torch.nn.Module):
517
+ """Linear layer with column parallelism.
518
+
519
+ The linear layer is defined as Y = XA + b. A is parallelized along
520
+ its second dimension as A = [A_1, ..., A_p].
521
+
522
+ Arguments:
523
+ input_size: first dimension of matrix A.
524
+ output_size: second dimension of matrix A.
525
+
526
+ Keyword Arguments
527
+ bias: If true, add bias
528
+ gather_output: If true, call all-gather on output and make Y available
529
+ to all GPUs, otherwise, every GPU will have its output
530
+ which is Y_i = XA_i
531
+ init_method: method to initialize weights. Note that bias is always set
532
+ to zero.
533
+ stride: For the strided linear layers.
534
+ keep_master_weight_for_test: This was added for testing and should be
535
+ set to False. It returns the master weights
536
+ used for initialization.
537
+ skip_bias_add: If True, do not add the bias term, instead
538
+ return it to be added by the caller. This
539
+ enables performance optimations where bias can
540
+ be fused with other elementwise operations.
541
+
542
+ skip_weight_param_allocation: If True, weight parameter is not allocated and must be passed
543
+ as a keyword argument `weight` during the forward pass. Note
544
+ that this does not affect bias, which will be allocated if
545
+ bias is True. Defaults to False.
546
+
547
+ config: ModelParallelConfig object
548
+
549
+ """
550
+
551
+ def __init__(self, input_size, output_size, *,
552
+ config: ModelParallelConfig,
553
+ init_method: Callable,
554
+ bias=True, gather_output=False, stride=1,
555
+ keep_master_weight_for_test=False,
556
+ skip_bias_add=False,
557
+ skip_weight_param_allocation: bool=False,
558
+ moe=False, enable_expert_tensor_parallelism=False):
559
+ torch.nn.Module.__init__(self)
560
+
561
+ # Keep input parameters
562
+ self.input_size = input_size
563
+ self.output_size = output_size
564
+ self.gather_output = gather_output
565
+ # Divide the weight matrix along the last dimension.
566
+ if moe and (not enable_expert_tensor_parallelism):
567
+ world_size = 1
568
+ self.is_expert_without_slicing = True
569
+ else:
570
+ world_size = get_tensor_model_parallel_world_size()
571
+ self.is_expert_without_slicing = False
572
+ self.output_size_per_partition = divide(output_size, world_size)
573
+ self.skip_bias_add = skip_bias_add
574
+ self.config = config
575
+
576
+ args = get_args()
577
+
578
+ # Parameters.
579
+ # Note: torch.nn.functional.linear performs XA^T + b and as a result
580
+ # we allocate the transpose.
581
+ # Initialize weight.
582
+ if not skip_weight_param_allocation:
583
+ if config.use_cpu_initialization:
584
+ self.weight = Parameter(torch.empty(self.output_size_per_partition,
585
+ self.input_size,
586
+ dtype=config.params_dtype))
587
+ if config.perform_initialization:
588
+ self.master_weight = _initialize_affine_weight_cpu(
589
+ self.weight, self.output_size, self.input_size,
590
+ self.output_size_per_partition, 0, init_method,
591
+ stride=stride, return_master_weight=keep_master_weight_for_test)
592
+ else:
593
+ self.weight = Parameter(torch.empty(
594
+ self.output_size_per_partition, self.input_size,
595
+ device=get_accelerator().current_device_name(), dtype=config.params_dtype))
596
+ if config.perform_initialization:
597
+ _initialize_affine_weight_gpu(self.weight, init_method,
598
+ partition_dim=0, stride=stride)
599
+ else:
600
+ self.weight = None
601
+
602
+ if bias:
603
+ if config.use_cpu_initialization:
604
+ self.bias = Parameter(torch.empty(
605
+ self.output_size_per_partition, dtype=config.params_dtype))
606
+ else:
607
+ self.bias = Parameter(torch.empty(
608
+ self.output_size_per_partition,
609
+ device=get_accelerator().current_device_name(),
610
+ dtype=config.params_dtype))
611
+ set_tensor_model_parallel_attributes(self.bias, True, 0, stride)
612
+ if config.perform_initialization:
613
+ # Always initialize bias to zero.
614
+ with torch.no_grad():
615
+ self.bias.zero_()
616
+ else:
617
+ self.register_parameter('bias', None)
618
+
619
+ self.async_tensor_model_parallel_allreduce = (
620
+ config.async_tensor_model_parallel_allreduce and
621
+ world_size > 1)
622
+
623
+ self.sequence_parallel = config.sequence_parallel
624
+ if self.sequence_parallel and world_size <= 1:
625
+ warnings.warn(
626
+ f"`sequence_parallel` is set to `True`, but tensor model parallel size is {world_size}. "
627
+ f"Disabling sequence parallel."
628
+ )
629
+ self.sequence_parallel = False
630
+
631
+ if config.gradient_accumulation_fusion and not _grad_accum_fusion_available:
632
+ raise RuntimeError(
633
+ "ColumnParallelLinear was called with gradient_accumulation_fusion set "
634
+ "to True but the custom CUDA extension fused_weight_gradient_mlp_cuda "
635
+ "module is not found. To use gradient_accumulation_fusion you must "
636
+ "install APEX with --cpp_ext and --cuda_ext. For example: "
637
+ "pip install --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" "
638
+ "Note that the extension requires CUDA>=11. Otherwise, you must turn off "
639
+ "gradient accumulation fusion."
640
+ )
641
+ self.gradient_accumulation_fusion = config.gradient_accumulation_fusion
642
+
643
+ if self.async_tensor_model_parallel_allreduce and self.sequence_parallel:
644
+ raise RuntimeError(
645
+ "`async_tensor_model_parallel_allreduce` and `sequence_parallel` "
646
+ "cannot be enabled at the same time."
647
+ )
648
+
649
+ self.output_parallel_linear = F.linear
650
+ if self.training and args.transformer_impl == "transformer_engine" \
651
+ and get_accelerator().device_name() == "hpu":
652
+ linear_fp8 = te.Linear(
653
+ self.input_size,
654
+ self.output_size_per_partition,
655
+ skip_weight_param_allocation=True,
656
+ bias=bias,
657
+ minimize_memory=not args.cache_fp8_weight)
658
+ self.output_parallel_linear = FP8ModuleRunner(linear_fp8, args.fp8_interval, args.cache_fp8_weight_fwd)
659
+
660
+ def forward(self,
661
+ input_: torch.Tensor,
662
+ weight: Optional[torch.Tensor] = None):
663
+ """Forward of ColumnParallelLinear
664
+
665
+ Args:
666
+ input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
667
+
668
+ weight (optional): weight tensor to use, compulsory when
669
+ skip_weight_param_allocation is True.
670
+
671
+ Returns:
672
+ - output
673
+ - bias
674
+
675
+ """
676
+ args = get_args()
677
+
678
+ if weight is None:
679
+ if self.weight is None:
680
+ raise RuntimeError("weight was not supplied to ColumnParallelLinear forward pass "
681
+ "and skip_weight_param_allocation is True.")
682
+ weight = self.weight
683
+ else:
684
+ # Check the weight passed in is the correct shape
685
+ expected_shape = (self.output_size_per_partition, self.input_size)
686
+ if weight.shape != expected_shape:
687
+ raise RuntimeError(f"supplied weight's shape is {tuple(weight.shape)}, "
688
+ f"not {expected_shape} as expected")
689
+
690
+ bias = self.bias if not self.skip_bias_add else None
691
+
692
+ if self.async_tensor_model_parallel_allreduce or \
693
+ self.sequence_parallel or \
694
+ self.is_expert_without_slicing: # non-expert only tensor parallelism
695
+ input_parallel = input_
696
+ else:
697
+ input_parallel = copy_to_tensor_model_parallel_region(input_)
698
+ # Matrix multiply.
699
+ if args.transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu':
700
+ gather_input = lambda x: x
701
+ if self.sequence_parallel:
702
+ gather_input = gather_from_sequence_parallel_region
703
+ output_parallel = self.output_parallel_linear(gather_input(input_parallel), self.weight, self.bias)
704
+ else:
705
+ output_parallel = linear_with_grad_accumulation_and_async_allreduce(
706
+ input=input_parallel,
707
+ weight=weight,
708
+ bias=bias,
709
+ gradient_accumulation_fusion=self.gradient_accumulation_fusion,
710
+ async_grad_allreduce=self.async_tensor_model_parallel_allreduce,
711
+ sequence_parallel=self.sequence_parallel
712
+ )
713
+ if self.gather_output and not self.is_expert_without_slicing:
714
+ # All-gather across the partitions.
715
+ assert not self.sequence_parallel
716
+ output = gather_from_tensor_model_parallel_region(output_parallel)
717
+ else:
718
+ output = output_parallel
719
+ output_bias = self.bias if self.skip_bias_add else None
720
+ return output, output_bias
721
+
722
+
723
+ class RowParallelLinear(torch.nn.Module):
724
+ """Linear layer with row parallelism.
725
+
726
+ The linear layer is defined as Y = XA + b. A is parallelized along
727
+ its first dimension and X along its second dimension as:
728
+ - -
729
+ | A_1 |
730
+ | . |
731
+ A = | . | X = [X_1, ..., X_p]
732
+ | . |
733
+ | A_p |
734
+ - -
735
+ Arguments:
736
+ input_size: first dimension of matrix A.
737
+ output_size: second dimension of matrix A.
738
+
739
+ Keyword Arguments:
740
+ bias: If true, add bias. Note that bias is not parallelized.
741
+ input_is_parallel: If true, we assume that the input is already
742
+ split across the GPUs and we do not split
743
+ again.
744
+ init_method: method to initialize weights. Note that bias is always set
745
+ to zero.
746
+ stride: For the strided linear layers.
747
+ keep_master_weight_for_test: This was added for testing and should be
748
+ set to False. It returns the master weights
749
+ used for initialization.
750
+ skip_bias_add: If True, do not add the bias term, instead
751
+ return it to be added by the caller. This
752
+ enables performance optimations where bias can
753
+ be fused with other elementwise operations.
754
+ config: ModelParallelConfig object
755
+
756
+ """
757
+
758
+ def __init__(self, input_size: int, output_size: int, *,
759
+ config: ModelParallelConfig,
760
+ init_method: Callable,
761
+ bias: bool = True,
762
+ input_is_parallel: bool = False,
763
+ stride: int = 1,
764
+ keep_master_weight_for_test: bool = False,
765
+ skip_bias_add: bool = False,
766
+ moe=False, enable_expert_tensor_parallelism=False):
767
+ torch.nn.Module.__init__(self)
768
+
769
+ # Keep input parameters
770
+ self.input_size = input_size
771
+ self.output_size = output_size
772
+ self.input_is_parallel = input_is_parallel
773
+ # Divide the weight matrix along the last dimension.
774
+ if moe and (not enable_expert_tensor_parallelism):
775
+ world_size = 1
776
+ else:
777
+ world_size = get_tensor_model_parallel_world_size()
778
+ self.is_expert_without_slicing = moe and world_size==1
779
+ self.input_size_per_partition = divide(input_size, world_size)
780
+ self.skip_bias_add = skip_bias_add
781
+ self.config = config
782
+ self.gradient_accumulation_fusion = config.gradient_accumulation_fusion
783
+ self.sequence_parallel = config.sequence_parallel
784
+ if self.sequence_parallel and not self.input_is_parallel:
785
+ raise RuntimeError("To enable `sequence_parallel`, `input_is_parallel` must be `True`")
786
+
787
+ args = get_args()
788
+
789
+ # Parameters.
790
+ # Note: torch.nn.functional.linear performs XA^T + b and as a result
791
+ # we allocate the transpose.
792
+ # Initialize weight.
793
+ if config.use_cpu_initialization:
794
+ self.weight = Parameter(torch.empty(self.output_size,
795
+ self.input_size_per_partition,
796
+ dtype=config.params_dtype))
797
+ if config.perform_initialization:
798
+ self.master_weight = _initialize_affine_weight_cpu(
799
+ self.weight, self.output_size, self.input_size,
800
+ self.input_size_per_partition, 1, init_method,
801
+ stride=stride, return_master_weight=keep_master_weight_for_test,
802
+ params_dtype=config.params_dtype)
803
+ else:
804
+ self.weight = Parameter(torch.empty(
805
+ self.output_size, self.input_size_per_partition,
806
+ device=get_accelerator().current_device_name(), dtype=config.params_dtype))
807
+ if config.perform_initialization:
808
+ _initialize_affine_weight_gpu(self.weight, init_method,
809
+ partition_dim=1, stride=stride)
810
+ if bias:
811
+ if config.use_cpu_initialization:
812
+ self.bias = Parameter(torch.empty(self.output_size,
813
+ dtype=config.params_dtype))
814
+ else:
815
+ self.bias = Parameter(torch.empty(
816
+ self.output_size, device=get_accelerator().current_device_name(),
817
+ dtype=config.params_dtype))
818
+ setattr(self.bias, 'sequence_parallel', self.sequence_parallel)
819
+
820
+ if config.perform_initialization:
821
+ # Always initialize bias to zero.
822
+ with torch.no_grad():
823
+ self.bias.zero_()
824
+ else:
825
+ self.register_parameter('bias', None)
826
+
827
+ if args.transformer_impl == "local" and args.normalization != "rmsnorm":
828
+ self.tmp = torch.zeros((self.output_size, self.input_size_per_partition), device=get_accelerator().current_device_name(), dtype=torch.int8)
829
+
830
+ self.output_parallel_linear = F.linear
831
+ if self.training and args.transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu':
832
+ linear_fp8 = te.Linear(
833
+ self.input_size_per_partition,
834
+ self.output_size,
835
+ skip_weight_param_allocation=True,
836
+ bias=bias,
837
+ minimize_memory=not args.cache_fp8_weight)
838
+ self.output_parallel_linear = FP8ModuleRunner(linear_fp8, args.fp8_interval, args.cache_fp8_weight_fwd)
839
+
840
+
841
+ def forward(self, input_):
842
+ """Forward of RowParallelLinear
843
+
844
+ Args:
845
+ input_: 3D tensor whose order of dimension is [sequence, batch, hidden]
846
+
847
+ Returns:
848
+ - output
849
+ - bias
850
+ """
851
+ # Set up backprop all-reduce.
852
+ if self.input_is_parallel or self.is_expert_without_slicing:
853
+ input_parallel = input_
854
+ else:
855
+ assert not self.sequence_parallel
856
+ input_parallel = scatter_to_tensor_model_parallel_region(input_)
857
+ # Matrix multiply.
858
+ if get_args().transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu':
859
+ gather_input = lambda x: x
860
+ if self.sequence_parallel:
861
+ gather_input = gather_from_sequence_parallel_region
862
+ output_parallel = self.output_parallel_linear(gather_input(input_parallel), self.weight, self.bias)
863
+ else:
864
+ output_parallel = linear_with_grad_accumulation_and_async_allreduce(
865
+ input=input_parallel,
866
+ weight=self.weight,
867
+ bias=None,
868
+ gradient_accumulation_fusion=self.gradient_accumulation_fusion,
869
+ async_grad_allreduce=False,
870
+ sequence_parallel=False,
871
+ )
872
+
873
+ # All-reduce across all the partitions.
874
+ if self.sequence_parallel:
875
+ output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)
876
+ elif self.is_expert_without_slicing: # non-expert only tensor-parallelism
877
+ output_ = output_parallel
878
+ else:
879
+ output_ = reduce_from_tensor_model_parallel_region(output_parallel)
880
+ if not self.skip_bias_add:
881
+ output = output_ + self.bias if self.bias is not None else output_
882
+ output_bias = None
883
+ else:
884
+ output = output_
885
+ output_bias = self.bias
886
+ return output, output_bias
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/mappings.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ import torch
5
+
6
+ from megatron.core.parallel_state import (
7
+ get_tensor_model_parallel_rank,
8
+ get_tensor_model_parallel_world_size,
9
+ get_tensor_model_parallel_group,
10
+ )
11
+ from .utils import split_tensor_along_last_dim
12
+ from deepspeed.accelerator import get_accelerator
13
+
14
+
15
+ def get_async_op():
16
+ async_op = False
17
+ if get_accelerator().device_name() == "hpu":
18
+ async_op = True
19
+ return async_op
20
+
21
+
22
+ def _reduce(input_):
23
+ """All-reduce the input tensor across model parallel group."""
24
+
25
+ # Bypass the function if we are using only 1 GPU.
26
+ if get_tensor_model_parallel_world_size()==1:
27
+ return input_
28
+
29
+ # All-reduce.
30
+ torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group(), async_op=get_async_op())
31
+
32
+ return input_
33
+
34
+
35
+ def _split_along_last_dim(input_):
36
+ """Split the tensor along its last dimension and keep the
37
+ corresponding slice."""
38
+
39
+ world_size = get_tensor_model_parallel_world_size()
40
+ # Bypass the function if we are using only 1 GPU.
41
+ if world_size == 1:
42
+ return input_
43
+
44
+ # Split along last dimension.
45
+ input_list = split_tensor_along_last_dim(input_, world_size)
46
+
47
+ # Note: torch.split does not create contiguous tensors by default.
48
+ rank = get_tensor_model_parallel_rank()
49
+ output = input_list[rank].contiguous()
50
+
51
+ return output
52
+
53
+
54
+ def _split_along_first_dim(input_):
55
+ """Split the tensor along its first dimension and keep the
56
+ corresponding slice."""
57
+
58
+ world_size = get_tensor_model_parallel_world_size()
59
+ # Bypass the function if we are using only 1 GPU.
60
+ if world_size == 1:
61
+ return input_
62
+
63
+ # Split along first dimension.
64
+ dim_size = input_.size()[0]
65
+ assert dim_size % world_size == 0, \
66
+ "First dimension of the tensor should be divisible by tensor parallel size"
67
+ local_dim_size = dim_size // world_size
68
+ rank = get_tensor_model_parallel_rank()
69
+ dim_offset = rank * local_dim_size
70
+
71
+ output = input_[dim_offset:dim_offset+local_dim_size].contiguous()
72
+
73
+ return output
74
+
75
+
76
+ def _gather_along_last_dim(input_):
77
+ """Gather tensors and concatinate along the last dimension."""
78
+
79
+ world_size = get_tensor_model_parallel_world_size()
80
+ # Bypass the function if we are using only 1 GPU.
81
+ if world_size == 1:
82
+ return input_
83
+
84
+ # Size and dimension.
85
+ last_dim = input_.dim() - 1
86
+ rank = get_tensor_model_parallel_rank()
87
+
88
+ tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
89
+ tensor_list[rank] = input_
90
+ torch.distributed.all_gather(tensor_list, input_, group=get_tensor_model_parallel_group(), async_op=get_async_op())
91
+
92
+ # Note: torch.cat already creates a contiguous tensor.
93
+ output = torch.cat(tensor_list, dim=last_dim).contiguous()
94
+
95
+ return output
96
+
97
+
98
+ def _gather_along_first_dim(input_):
99
+ """Gather tensors and concatinate along the first dimension."""
100
+
101
+ world_size = get_tensor_model_parallel_world_size()
102
+ # Bypass the function if we are using only 1 GPU.
103
+ if world_size == 1:
104
+ return input_
105
+
106
+ dim_size = list(input_.size())
107
+ dim_size[0] = dim_size[0] * world_size
108
+
109
+ output = torch.empty(dim_size, dtype=input_.dtype,
110
+ device=get_accelerator().current_device_name())
111
+ torch.distributed._all_gather_base(output, input_.contiguous(),
112
+ group=get_tensor_model_parallel_group(),
113
+ async_op=get_async_op())
114
+
115
+ return output
116
+
117
+ def _reduce_scatter_along_first_dim(input_):
118
+ """Reduce-scatter the input tensor across model parallel group."""
119
+ world_size = get_tensor_model_parallel_world_size()
120
+ # Bypass the function if we are using only 1 GPU.
121
+ if world_size == 1:
122
+ return input_
123
+
124
+ dim_size = list(input_.size())
125
+ assert dim_size[0] % world_size == 0, \
126
+ "First dimension of the tensor should be divisible by tensor parallel size"
127
+
128
+ dim_size[0] = dim_size[0] // world_size
129
+
130
+ output = torch.empty(dim_size, dtype=input_.dtype,
131
+ device=get_accelerator().current_device_name())
132
+ torch.distributed._reduce_scatter_base(output, input_.contiguous(),
133
+ group=get_tensor_model_parallel_group(),
134
+ async_op=get_async_op())
135
+ return output
136
+
137
+
138
+ class _CopyToModelParallelRegion(torch.autograd.Function):
139
+ """Pass the input to the model parallel region."""
140
+
141
+ @staticmethod
142
+ def symbolic(graph, input_):
143
+ return input_
144
+
145
+ @staticmethod
146
+ def forward(ctx, input_):
147
+ return input_
148
+
149
+ @staticmethod
150
+ def backward(ctx, grad_output):
151
+ return _reduce(grad_output)
152
+
153
+
154
+ class _ReduceFromModelParallelRegion(torch.autograd.Function):
155
+ """All-reduce the input from the model parallel region."""
156
+
157
+ @staticmethod
158
+ def symbolic(graph, input_):
159
+ return _reduce(input_)
160
+
161
+ @staticmethod
162
+ def forward(ctx, input_):
163
+ return _reduce(input_)
164
+
165
+ @staticmethod
166
+ def backward(ctx, grad_output):
167
+ return grad_output
168
+
169
+
170
+ class _ScatterToModelParallelRegion(torch.autograd.Function):
171
+ """Split the input and keep only the corresponding chuck to the rank."""
172
+
173
+ @staticmethod
174
+ def symbolic(graph, input_):
175
+ return _split_along_last_dim(input_)
176
+
177
+ @staticmethod
178
+ def forward(ctx, input_):
179
+ return _split_along_last_dim(input_)
180
+
181
+ @staticmethod
182
+ def backward(ctx, grad_output):
183
+ return _gather_along_last_dim(grad_output)
184
+
185
+
186
+ class _GatherFromModelParallelRegion(torch.autograd.Function):
187
+ """Gather the input from model parallel region and concatinate."""
188
+
189
+ @staticmethod
190
+ def symbolic(graph, input_):
191
+ return _gather_along_last_dim(input_)
192
+
193
+ @staticmethod
194
+ def forward(ctx, input_):
195
+ return _gather_along_last_dim(input_)
196
+
197
+ @staticmethod
198
+ def backward(ctx, grad_output):
199
+ return _split_along_last_dim(grad_output)
200
+
201
+
202
+ class _ScatterToSequenceParallelRegion(torch.autograd.Function):
203
+ """Split the input and keep only the corresponding chuck to the rank."""
204
+
205
+ @staticmethod
206
+ def symbolic(graph, input_):
207
+ return _split_along_first_dim(input_)
208
+
209
+ @staticmethod
210
+ def forward(ctx, input_):
211
+ return _split_along_first_dim(input_)
212
+
213
+ @staticmethod
214
+ def backward(ctx, grad_output):
215
+ return _gather_along_first_dim(grad_output)
216
+
217
+
218
+ class _GatherFromSequenceParallelRegion(torch.autograd.Function):
219
+ """Gather the input from sequence parallel region and concatinate."""
220
+
221
+ @staticmethod
222
+ def symbolic(graph, input_, tensor_parallel_output_grad=True):
223
+ return _gather_along_first_dim(input_)
224
+
225
+ @staticmethod
226
+ def forward(ctx, input_, tensor_parallel_output_grad=True):
227
+ ctx.tensor_parallel_output_grad = tensor_parallel_output_grad
228
+ return _gather_along_first_dim(input_)
229
+
230
+ @staticmethod
231
+ def backward(ctx, grad_output):
232
+ tensor_parallel_output_grad = ctx.tensor_parallel_output_grad
233
+
234
+ # If the computation graph after the gather operation is
235
+ # in the tensor parallel mode, output gradients need to reduce
236
+ # scattered and whereas if the computation is duplicated,
237
+ # output gradients need to be scattered.
238
+ if tensor_parallel_output_grad:
239
+ return _reduce_scatter_along_first_dim(grad_output), None
240
+ else:
241
+ return _split_along_first_dim(grad_output), None
242
+
243
+
244
+ class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function):
245
+ """Reduce scatter the input from the model parallel region."""
246
+
247
+ @staticmethod
248
+ def symbolic(graph, input_):
249
+ return _reduce_scatter_along_first_dim(input_)
250
+
251
+ @staticmethod
252
+ def forward(ctx, input_):
253
+ return _reduce_scatter_along_first_dim(input_)
254
+
255
+ @staticmethod
256
+ def backward(ctx, grad_output):
257
+ return _gather_along_first_dim(grad_output)
258
+
259
+
260
+ # -----------------
261
+ # Helper functions.
262
+ # -----------------
263
+
264
+ def copy_to_tensor_model_parallel_region(input_):
265
+ return _CopyToModelParallelRegion.apply(input_)
266
+
267
+
268
+ def reduce_from_tensor_model_parallel_region(input_):
269
+ return _ReduceFromModelParallelRegion.apply(input_)
270
+
271
+
272
+ def scatter_to_tensor_model_parallel_region(input_):
273
+ return _ScatterToModelParallelRegion.apply(input_)
274
+
275
+
276
+ def gather_from_tensor_model_parallel_region(input_):
277
+ return _GatherFromModelParallelRegion.apply(input_)
278
+
279
+
280
+ def scatter_to_sequence_parallel_region(input_):
281
+ return _ScatterToSequenceParallelRegion.apply(input_)
282
+
283
+
284
+ def gather_from_sequence_parallel_region(input_, tensor_parallel_output_grad=True):
285
+ return _GatherFromSequenceParallelRegion.apply(input_, tensor_parallel_output_grad)
286
+
287
+
288
+ def reduce_scatter_to_sequence_parallel_region(input_):
289
+ return _ReduceScatterToSequenceParallelRegion.apply(input_)
290
+
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/random.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ # Parts of the code here are adapted from PyTorch
4
+ # repo: https://github.com/pytorch/pytorch
5
+
6
+ import contextlib
7
+
8
+ import torch
9
+ from torch import _C
10
+ from deepspeed.accelerator import get_accelerator
11
+ from torch.utils.checkpoint import detach_variable
12
+
13
+ from megatron import get_args
14
+ from megatron.memory import allocate_mem_buff
15
+ from megatron.core.parallel_state import (
16
+ get_data_parallel_rank,
17
+ get_tensor_model_parallel_group,
18
+ get_tensor_model_parallel_rank,
19
+ get_tensor_model_parallel_world_size,
20
+ )
21
+
22
+ from .utils import (
23
+ split_tensor_into_1d_equal_chunks,
24
+ gather_split_1d_tensor,
25
+ )
26
+
27
+ from megatron.core.utils import safely_set_viewless_tensor_data
28
+
29
+ import deepspeed
30
+
31
+ # Default name for the model parallel rng tracker.
32
+ _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
33
+
34
+ # Whether apply model parallelsim to checkpointed hidden states.
35
+ _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None
36
+
37
+
38
+ def init_checkpointed_activations_memory_buffer():
39
+ """Initializ the memory buffer for the checkpointed activations."""
40
+ args = get_args()
41
+
42
+ per_layer = args.micro_batch_size * args.max_position_embeddings * \
43
+ args.hidden_size // args.tensor_model_parallel_size
44
+ assert args.num_layers % args.checkpoint_num_layers == 0, \
45
+ 'number of layers is not divisible by checkpoint-num-layers'
46
+ num_checkpointer_layers = args.num_layers // args.checkpoint_num_layers
47
+ numel = per_layer * num_checkpointer_layers
48
+ dtype = torch.half
49
+ if not args.fp16:
50
+ dtype = torch.float
51
+
52
+ global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER
53
+ assert _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None, \
54
+ 'checkpointed activations memory buffer is already allocated.'
55
+ _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff(
56
+ 'checkpointed activations', numel, dtype, track_usage=False)
57
+
58
+
59
+ def reset_checkpointed_activations_memory_buffer():
60
+ """Reset the memory used for checkpointing."""
61
+ if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
62
+ _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset()
63
+
64
+ def _set_cuda_rng_state(new_state, device=-1):
65
+ """Sets the random number generator state of the current GPU.
66
+
67
+ Argumentss:
68
+ new_state (torch.ByteTensor): The desired state
69
+ This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
70
+ with a single change: the input state is not cloned. Cloning caused
71
+ major performance issues for +4 GPU cases.
72
+ """
73
+ if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
74
+ # older PyTorch
75
+ def cb():
76
+ with get_accelerator().device(device):
77
+ _C._cuda_setRNGState(new_state)
78
+ else:
79
+ # newer PyTorch
80
+ if device == -1:
81
+ device = torch.device(get_accelerator().device_name())
82
+ elif isinstance(device, str):
83
+ device = torch.device(device)
84
+ elif isinstance(device, int):
85
+ device = torch.device(get_accelerator().device_name(), device)
86
+
87
+ def cb():
88
+ idx = device.index
89
+ if idx is None:
90
+ idx = get_accelerator().current_device()
91
+ default_generator = get_accelerator().default_generator(idx)
92
+ default_generator.set_state(new_state)
93
+
94
+ get_accelerator().lazy_call(cb)
95
+
96
+
97
+
98
+ class CudaRNGStatesTracker:
99
+ """Tracker for the cuda RNG states.
100
+
101
+ Using the `add` method, a cuda rng state is initialized based on
102
+ the input `seed` and is assigned to `name`. Later, by forking the
103
+ rng state, we can perform operations and return to our starting
104
+ cuda state.
105
+ """
106
+
107
+ def __init__(self):
108
+ # Map from a string name to the cuda rng state.
109
+ self.states_ = {}
110
+ # Seeds are just for book keeping and ensure no seed is set twice.
111
+ self.seeds_ = set()
112
+
113
+ def reset(self):
114
+ """Set to the initial state (no tracker)."""
115
+ self.states_ = {}
116
+ self.seeds_ = set()
117
+
118
+ def get_states(self):
119
+ """Get rng states. Copy the dictionary so we have direct
120
+ pointers to the states, not just a pointer to the dictionary."""
121
+ states = {}
122
+ for name in self.states_:
123
+ states[name] = self.states_[name]
124
+ return states
125
+
126
+ def set_states(self, states):
127
+ """Set the rng states. For efficiency purposes, we do not check
128
+ the size of seed for compatibility."""
129
+ self.states_ = states
130
+
131
+ def add(self, name, seed):
132
+ """Track the rng state."""
133
+ # Check seed is not already used.
134
+ if seed in self.seeds_:
135
+ raise Exception('seed {} already exists'.format(seed))
136
+ self.seeds_.add(seed)
137
+ # Check that state is not already defined.
138
+ if name in self.states_:
139
+ raise Exception('cuda rng state {} already exists'.format(name))
140
+ # Get the current rng state.
141
+ orig_rng_state = get_accelerator().get_rng_state()
142
+ # Set the new state and store it.
143
+ get_accelerator().manual_seed(seed)
144
+ self.states_[name] = get_accelerator().get_rng_state()
145
+ # Reset rng state to what it was.
146
+ _set_cuda_rng_state(orig_rng_state)
147
+
148
+ @contextlib.contextmanager
149
+ def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
150
+ """Fork the cuda rng state, perform operations, and exit with
151
+ the original state."""
152
+ # Check if we have added the state
153
+ if name not in self.states_:
154
+ print(name, self.states_)
155
+ raise Exception('cuda rng state {} is not added'.format(name))
156
+ # Store current rng state.
157
+ orig_cuda_rng_state = get_accelerator().get_rng_state()
158
+ # Set rng state to the desired one
159
+ _set_cuda_rng_state(self.states_[name])
160
+ # Do the stuff we wanted to do.
161
+ try:
162
+ yield
163
+ finally:
164
+ # Update the current rng state for later use.
165
+ self.states_[name] = get_accelerator().get_rng_state()
166
+ # And set the state to the original state we started with.
167
+ _set_cuda_rng_state(orig_cuda_rng_state)
168
+
169
+
170
+ # RNG tracker object.
171
+ _CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
172
+
173
+ def get_cuda_rng_tracker():
174
+ """Get cuda rng tracker."""
175
+ if deepspeed.checkpointing.is_configured():
176
+ return deepspeed.checkpointing.get_cuda_rng_tracker()
177
+
178
+ return _CUDA_RNG_STATE_TRACKER
179
+
180
+
181
+ def model_parallel_cuda_manual_seed(seed):
182
+ """Initialize model parallel cuda seed.
183
+
184
+ This function should be called after the model parallel is
185
+ initialized. Also, no torch.cuda.manual_seed should be called
186
+ after this function. Basically, this is replacement for that
187
+ function.
188
+ Two set of RNG states are tracked:
189
+ default state: This is for data parallelism and is the same among a
190
+ set of model parallel GPUs but different across
191
+ different model paralle groups. This is used for
192
+ example for dropout in the non-tensor-model-parallel regions.
193
+ tensor-model-parallel state: This state is different among a set of model
194
+ parallel GPUs, but the same across data parallel
195
+ groups. This is used for example for dropout in
196
+ model parallel regions.
197
+ """
198
+ if deepspeed.checkpointing.is_configured():
199
+ return deepspeed.checkpointing.model_parallel_cuda_manual_seed(seed)
200
+
201
+ # 2718 is just for fun and any POSITIVE value will work.
202
+ offset = seed + 2718
203
+ tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank()
204
+ # Data parallel gets the original seed.
205
+ data_parallel_seed = seed
206
+
207
+ if torch.distributed.get_rank() == 0:
208
+ print('> initializing model parallel cuda seeds on global rank {}, '
209
+ 'model parallel rank {}, and data parallel rank {} with '
210
+ 'model parallel seed: {} and data parallel seed: {}'.format(
211
+ torch.distributed.get_rank(), get_tensor_model_parallel_rank(),
212
+ get_data_parallel_rank(), tensor_model_parallel_seed,
213
+ data_parallel_seed), flush=True)
214
+ _CUDA_RNG_STATE_TRACKER.reset()
215
+ # Set the default state.
216
+ get_accelerator().manual_seed(data_parallel_seed)
217
+ # and model parallel state.
218
+ _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
219
+ tensor_model_parallel_seed)
220
+
221
+
222
+ def model_parallel_reconfigure_tp_seed(seed):
223
+ if deepspeed.checkpointing.is_configured():
224
+ return deepspeed.checkpointing.model_parallel_reconfigure_tp_seed(seed)
225
+
226
+ model_parallel_seed = seed + 2718 + get_tensor_model_parallel_rank()
227
+ with _CUDA_RNG_STATE_TRACKER.fork():
228
+ get_accelerator().manual_seed(model_parallel_seed)
229
+
230
+
231
+ class CheckpointFunction(torch.autograd.Function):
232
+ """This function is adapted from torch.utils.checkpoint with
233
+ two main changes:
234
+ 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
235
+ 2) the states in the model parallel tracker are also properly
236
+ tracked/set/reset.
237
+ """
238
+ @staticmethod
239
+ def forward(ctx, run_function, distribute_saved_activations, *args):
240
+ ctx.run_function = run_function
241
+ ctx.distribute_saved_activations \
242
+ = distribute_saved_activations
243
+
244
+ # Copy the rng states.
245
+ ctx.fwd_cpu_rng_state = torch.get_rng_state()
246
+ ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state()
247
+ ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
248
+
249
+ with torch.no_grad():
250
+ outputs = run_function(*args)
251
+
252
+ # Divide hidden states across model parallel group and only keep
253
+ # the chunk corresponding to the current rank.
254
+ if distribute_saved_activations:
255
+ ctx.input_0_shape = args[0].data.shape
256
+ safely_set_viewless_tensor_data(
257
+ args[0],
258
+ split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True))
259
+
260
+ # HACK: currently when DeepSpeed is used, we always set
261
+ # distribute_saved_activations to false, and use the following older
262
+ # activation checkpointing mechanisms
263
+ if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
264
+ ctx.input_0_shape = args[0].data.shape
265
+ args[0].data = split_tensor_into_1d_equal_chunks(args[0].data)
266
+ args[0].data = _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.add(
267
+ args[0].data)
268
+
269
+ # Store everything.
270
+ ctx.save_for_backward(*args)
271
+
272
+ return outputs
273
+
274
+ @staticmethod
275
+ def backward(ctx, *args):
276
+ if not torch.autograd._is_checkpoint_valid():
277
+ raise RuntimeError("Checkpointing is not compatible with .grad(), "
278
+ "please use .backward() if possible")
279
+ inputs = ctx.saved_tensors
280
+ if ctx.distribute_saved_activations:
281
+ safely_set_viewless_tensor_data(
282
+ inputs[0],
283
+ gather_split_1d_tensor(inputs[0].data).view(ctx.input_0_shape))
284
+ # HACK: currently when DeepSpeed is used, we always set
285
+ # distribute_saved_activations to false, and use the following older
286
+ # activation checkpointing mechanisms
287
+ if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None:
288
+ inputs[0].data = gather_split_1d_tensor(inputs[0].data)
289
+ inputs[0].data = inputs[0].data.view(ctx.input_0_shape)
290
+
291
+ # Store the current states.
292
+ bwd_cpu_rng_state = torch.get_rng_state()
293
+ bwd_cuda_rng_state = get_accelerator().get_rng_state()
294
+ bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
295
+
296
+ # Set the states to what it used to be before the forward pass.
297
+ torch.set_rng_state(ctx.fwd_cpu_rng_state)
298
+ _set_cuda_rng_state(ctx.fwd_cuda_rng_state)
299
+ get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
300
+
301
+ # Compute the forward pass.
302
+ detached_inputs = detach_variable(inputs)
303
+ with torch.enable_grad():
304
+ outputs = ctx.run_function(*detached_inputs)
305
+
306
+ # Set the states back to what it was at the start of this function.
307
+ torch.set_rng_state(bwd_cpu_rng_state)
308
+ _set_cuda_rng_state(bwd_cuda_rng_state)
309
+ get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
310
+
311
+ if isinstance(outputs, torch.Tensor):
312
+ outputs = (outputs,)
313
+ elif len(outputs) == 2 and isinstance(outputs[1], torch.Tensor) and \
314
+ torch.equal(outputs[1], torch.tensor(0).to(get_accelerator().device_name())):
315
+ # a hacky solution to overcome issue when running old script examples/pretrain_gpt_distributed.sh
316
+ outputs = (outputs[0],)
317
+ torch.autograd.backward(outputs, args)
318
+ grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp
319
+ for inp in detached_inputs)
320
+ return (None, None) + grads
321
+
322
+
323
+ def checkpoint(function, distribute_saved_activations, *args):
324
+ """Checkpoint a model or part of the model.
325
+ This has been directly copied from torch.utils.checkpoint."""
326
+ if deepspeed.checkpointing.is_configured():
327
+ return deepspeed.checkpointing.checkpoint(function, *args)
328
+
329
+ return CheckpointFunction.apply(function,
330
+ distribute_saved_activations, *args)
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/attention.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ from abc import ABC, abstractmethod
4
+ from .enums import AttnMaskType
5
+ from .transformer_config import TransformerConfig
6
+ import torch
7
+
8
+ from megatron.core import parallel_state, tensor_parallel
9
+ from megatron.core.transformer.core_attention import CoreAttention
10
+ from megatron.core.utils import divide
11
+
12
+ from megatron.core.transformer.module import MegatronModule
13
+ from megatron.core.transformer.enums import AttnType, AttnMaskType
14
+ from megatron.core.transformer.transformer_config import TransformerConfig
15
+ from megatron.core.transformer.custom_layers.transformer_engine import \
16
+ TECoreAttention, TEColumnParallelLinear, TERowParallelLinear
17
+
18
+ class Attention(MegatronModule, ABC):
19
+ """Attention layer abstract class.
20
+
21
+ This layer only contains common modules required for the "self attn" and
22
+ "cross attn" specializations.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ config: TransformerConfig,
28
+ layer_number: int = 1,
29
+ attn_mask_type=AttnMaskType.padding,
30
+ ):
31
+ super().__init__(config=config)
32
+
33
+ self.config = config
34
+ self.layer_number = layer_number
35
+ self.attn_mask_type = attn_mask_type
36
+
37
+ self.projection_size = self.config.kv_channels * self.config.num_attention_heads
38
+
39
+ # Per attention head and per partition values.
40
+ world_size = parallel_state.get_tensor_model_parallel_world_size()
41
+ self.hidden_size_per_attention_head = divide(self.projection_size, self.config.num_attention_heads)
42
+ self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size)
43
+
44
+ self.core_attention = TECoreAttention(
45
+ config=self.config,
46
+ layer_number=self.layer_number,
47
+ attn_mask_type=self.attn_mask_type
48
+ )
49
+
50
+ self.checkpoint_core_attention = self.config.recompute_granularity == 'selective'
51
+
52
+ # Output.
53
+ self.linear_proj = TERowParallelLinear(
54
+ self.projection_size,
55
+ self.config.hidden_size,
56
+ config=self.config,
57
+ init_method=self.config.output_layer_init_method,
58
+ bias=self.config.add_bias_linear,
59
+ skip_bias_add=True,
60
+ )
61
+
62
+ def _checkpointed_attention_forward(self, query, key, value, attention_mask):
63
+ """Forward method with selective activation checkpointing."""
64
+
65
+ def custom_forward(*inputs):
66
+ query = inputs[0]
67
+ key = inputs[1]
68
+ value = inputs[2]
69
+ attention_mask = inputs[3]
70
+ output_ = self.core_attention(query, key, value, attention_mask)
71
+ return output_
72
+
73
+ hidden_states = tensor_parallel.checkpoint(
74
+ custom_forward, False, query, key, value, attention_mask
75
+ )
76
+
77
+ return hidden_states
78
+
79
+ def _allocate_memory(self, inference_max_sequence_len, batch_size):
80
+ return torch.empty(
81
+ inference_max_sequence_len,
82
+ batch_size,
83
+ self.num_attention_heads_per_partition,
84
+ self.hidden_size_per_attention_head,
85
+ dtype=self.params_dtype,
86
+ device=torch.cuda.current_device(),
87
+ )
88
+
89
+ @abstractmethod
90
+ def get_query_key_value_tensors(self, hidden_states, key_value_states):
91
+ """
92
+ This method needs to be implemented based on whether the derived class
93
+ is "self-attn" or "cross-attn".
94
+ """
95
+
96
+ def forward(self, hidden_states, attention_mask, key_value_states=None, inference_params=None):
97
+ # hidden_states: [sq, b, h]
98
+
99
+ # =================================================
100
+ # Pre-allocate memory for key-values for inference.
101
+ # =================================================
102
+ # @jcasper how should we do inference_params?
103
+ # can do 1. args, 2. add inference params to TransformerConfig
104
+ # 3. create another config object 4. something else?
105
+ if inference_params:
106
+ if self.layer_number not in inference_params.key_value_memory_dict:
107
+ inf_max_seq_len = inference_params.max_sequence_len
108
+ inf_max_batch_size = inference_params.max_batch_size
109
+ inference_key_memory = self._allocate_memory(inf_max_seq_len, inf_max_batch_size)
110
+ inference_value_memory = self._allocate_memory(inf_max_seq_len, inf_max_batch_size)
111
+ inference_params.key_value_memory_dict[self.layer_number] = (
112
+ inference_key_memory,
113
+ inference_value_memory,
114
+ )
115
+ else:
116
+ inference_key_memory, inference_value_memory = inference_params.key_value_memory_dict[
117
+ self.layer_number
118
+ ]
119
+
120
+ # =====================
121
+ # Query, Key, and Value
122
+ # =====================
123
+ # Get the query, key and value tensors based on the type of attention -
124
+ # self or cross attn.
125
+ query, key, value = self.get_query_key_value_tensors(hidden_states, key_value_states)
126
+
127
+ # ==================================
128
+ # Adjust key and value for inference
129
+ # ==================================
130
+
131
+ if inference_params:
132
+ batch_start = inference_params.batch_size_offset
133
+ batch_end = batch_start + key.size(1)
134
+ assert batch_end <= inference_key_memory.size(1)
135
+ sequence_start = inference_params.sequence_len_offset
136
+ sequence_end = sequence_start + key.size(0)
137
+ assert sequence_end <= inference_key_memory.size(0)
138
+ # Copy key and values.
139
+ inference_key_memory[sequence_start:sequence_end, batch_start:batch_end, ...] = key
140
+ inference_value_memory[sequence_start:sequence_end, batch_start:batch_end, ...] = value
141
+ key = inference_key_memory[:sequence_end, batch_start:batch_end, ...]
142
+ value = inference_value_memory[:sequence_end, batch_start:batch_end, ...]
143
+
144
+ # ==================================
145
+ # core attention computation
146
+ # ==================================
147
+
148
+ if self.checkpoint_core_attention:
149
+ core_attn_out = self._checkpointed_attention_forward(query, key, value, attention_mask)
150
+ else:
151
+ core_attn_out = self.core_attention(query, key, value, attention_mask)
152
+
153
+ # =================
154
+ # Output. [sq, b, h]
155
+ # =================
156
+
157
+ output, bias = self.linear_proj(core_attn_out)
158
+
159
+ return output, bias
160
+
161
+ class SelfAttention(Attention):
162
+ """Self-attention layer class
163
+
164
+ Self-attention layer takes input with size [s, b, h]
165
+ and returns output of the same size.
166
+ """
167
+ def __init__(self,
168
+ config: TransformerConfig,
169
+ layer_number: int = 1,
170
+ attn_mask_type=AttnMaskType.padding):
171
+ super().__init__(
172
+ config=config,
173
+ layer_number=layer_number,
174
+ attn_mask_type=attn_mask_type
175
+ )
176
+
177
+ self.linear_qkv = TEColumnParallelLinear(
178
+ self.config.hidden_size,
179
+ 3 * self.projection_size,
180
+ config=self.config,
181
+ init_method=self.config.init_method,
182
+ bias=self.config.add_bias_linear,
183
+ skip_bias_add=False
184
+ )
185
+
186
+ def get_query_key_value_tensors(self, hidden_states, key_value_states=None):
187
+ """
188
+ Derives `query`, `key` and `value` tensors from `hidden_states`.
189
+ """
190
+ # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
191
+ mixed_qkv, _ = self.linear_qkv(hidden_states)
192
+
193
+ # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
194
+ new_tensor_shape = mixed_qkv.size()[:-1] + (
195
+ self.num_attention_heads_per_partition,
196
+ 3 * self.hidden_size_per_attention_head,
197
+ )
198
+ mixed_qkv = mixed_qkv.view(*new_tensor_shape)
199
+
200
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
201
+ (query, key, value) = tensor_parallel.split_tensor_along_last_dim(mixed_qkv, 3)
202
+
203
+ return query, key, value
204
+
205
+ class CrossAttention(Attention):
206
+ """Cross-attention layer class
207
+
208
+ Cross-attention layer takes input with size [s, b, h] and context with size
209
+ [s, b, h] and returns output of the same size.
210
+ """
211
+ def __init__(self,
212
+ config: TransformerConfig,
213
+ layer_number: int = 1,
214
+ attn_mask_type=AttnMaskType.padding):
215
+ super().__init__(
216
+ config=config,
217
+ layer_number=layer_number,
218
+ attn_mask_type=attn_mask_type
219
+ )
220
+
221
+ self.linear_q = TEColumnParallelLinear(
222
+ self.config.hidden_size,
223
+ self.projection_size,
224
+ config=self.config,
225
+ init_method=self.config.init_method,
226
+ bias=self.config.add_bias_linear,
227
+ skip_bias_add=False
228
+ )
229
+
230
+ self.linear_kv = TEColumnParallelLinear(
231
+ self.config.hidden_size,
232
+ 2 * self.projection_size,
233
+ config=self.config,
234
+ init_method=self.config.init_method,
235
+ bias=self.config.add_bias_linear,
236
+ skip_bias_add=False
237
+ )
238
+
239
+ def get_query_key_value_tensors(self, hidden_states, key_value_states):
240
+ """
241
+ Derives `query` tensor from `hidden_states`, and `key`/`value` tensors
242
+ from `key_value_states`.
243
+ """
244
+ # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
245
+ mixed_kv, _ = self.linear_kv(key_value_states)
246
+
247
+ # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
248
+ new_tensor_shape = mixed_kv.size()[:-1] + (
249
+ self.num_attention_heads_per_partition,
250
+ 2 * self.hidden_size_per_attention_head,
251
+ )
252
+ mixed_kv = mixed_kv.view(*new_tensor_shape)
253
+
254
+ # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
255
+ (key, value) = tensor_parallel.split_tensor_along_last_dim(mixed_kv, 2)
256
+
257
+ # Attention head [sq, b, h] --> [sq, b, hp]
258
+ query, _ = self.linear_q(hidden_states)
259
+
260
+ # [sq, b, hp] --> [sq, b, np, hn]
261
+ new_tensor_shape = query.size()[:-1] + (
262
+ self.num_attention_heads_per_partition,
263
+ self.hidden_size_per_attention_head,
264
+ )
265
+ query = query.view(*new_tensor_shape)
266
+
267
+ return query, key, value
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/core_attention.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+
4
+ import math
5
+
6
+ import torch
7
+ from torch import Tensor
8
+
9
+ from megatron.core import parallel_state, tensor_parallel
10
+ from megatron.core.utils import divide
11
+ from megatron.core.transformer.module import MegatronModule
12
+ from megatron.core.transformer.transformer_config import TransformerConfig
13
+ from megatron.core.transformer.enums import AttnMaskType
14
+ from megatron.core.transformer.utils import attention_mask_func
15
+ from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax
16
+
17
+
18
+ class CoreAttention(MegatronModule):
19
+ """
20
+ Region where selective activation recomputation is applied.
21
+ This region is memory intensive but less compute intensive which
22
+ makes activation checkpointing more efficient for LLMs (20B+).
23
+ See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details.
24
+
25
+ We use the following notation:
26
+ h: hidden size
27
+ n: number of attention heads
28
+ p: number of tensor model parallel partitions
29
+ b: batch size
30
+ s: sequence length
31
+ """
32
+
33
+ def __init__(self, config: TransformerConfig, layer_number: int = 1, attn_mask_type=AttnMaskType.padding):
34
+ super().__init__(config=config)
35
+
36
+ self.config: TransformerConfig = config
37
+
38
+ self.layer_number = max(1, layer_number)
39
+ self.attn_mask_type = attn_mask_type
40
+
41
+ projection_size = self.config.kv_channels * config.num_attention_heads
42
+
43
+ # Per attention head and per partition values.
44
+ world_size = parallel_state.get_tensor_model_parallel_world_size()
45
+ self.hidden_size_per_partition = divide(projection_size, world_size)
46
+ self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)
47
+ self.num_attention_heads_per_partition = divide(config.num_attention_heads, world_size)
48
+
49
+ coeff = None
50
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
51
+ if self.config.apply_query_key_layer_scaling:
52
+ coeff = self.layer_number
53
+ self.norm_factor *= coeff
54
+
55
+ self.scale_mask_softmax = FusedScaleMaskSoftmax(
56
+ input_in_fp16=self.config.fp16,
57
+ input_in_bf16=self.config.bf16,
58
+ attn_mask_type=self.attn_mask_type,
59
+ scaled_masked_softmax_fusion=self.config.masked_softmax_fusion,
60
+ mask_func=attention_mask_func,
61
+ softmax_in_fp32=self.config.attention_softmax_in_fp32,
62
+ scale=coeff,
63
+ )
64
+
65
+ # Dropout. Note that for a single iteration, this layer will generate
66
+ # different outputs on different number of parallel partitions but
67
+ # on average it should not be partition dependent.
68
+ self.attention_dropout = torch.nn.Dropout(self.config.attention_dropout)
69
+
70
+ def forward(self, query_layer: Tensor, key_layer: Tensor, value_layer: Tensor, attention_mask: Tensor):
71
+
72
+ # ===================================
73
+ # Raw attention scores. [b, n/p, s, s]
74
+ # ===================================
75
+
76
+ # [b, np, sq, sk]
77
+ output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
78
+
79
+ # [sq, b, np, hn] -> [sq, b * np, hn]
80
+ query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
81
+ # [sk, b, np, hn] -> [sk, b * np, hn]
82
+ key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
83
+
84
+ # preallocting input tensor: [b * np, sq, sk]
85
+ matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor(
86
+ (output_size[0] * output_size[1], output_size[2], output_size[3]), query_layer.dtype, "mpu"
87
+ )
88
+
89
+ # Raw attention scores. [b * np, sq, sk]
90
+ matmul_result = torch.baddbmm(
91
+ matmul_input_buffer,
92
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
93
+ key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
94
+ beta=0.0,
95
+ alpha=(1.0 / self.norm_factor),
96
+ )
97
+
98
+ # change view to [b, np, sq, sk]
99
+ attention_scores = matmul_result.view(*output_size)
100
+
101
+ # ===========================
102
+ # Attention probs and dropout
103
+ # ===========================
104
+
105
+ # attention scores and attention mask [b, np, sq, sk]
106
+ attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask)
107
+
108
+ # This is actually dropping out entire tokens to attend to, which might
109
+ # seem a bit unusual, but is taken from the original Transformer paper.
110
+
111
+ if not self.config.sequence_parallel:
112
+ with tensor_parallel.get_cuda_rng_tracker().fork():
113
+ attention_probs = self.attention_dropout(attention_probs)
114
+ else:
115
+ attention_probs = self.attention_dropout(attention_probs)
116
+
117
+ # =========================
118
+ # Context layer. [sq, b, hp]
119
+ # =========================
120
+
121
+ # value_layer -> context layer.
122
+ # [sk, b, np, hn] --> [b, np, sq, hn]
123
+
124
+ # context layer shape: [b, np, sq, hn]
125
+ output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
126
+
127
+ # change view [sk, b * np, hn]
128
+ value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
129
+
130
+ # change view [b * np, sq, sk]
131
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
132
+
133
+ # matmul: [b * np, sq, hn]
134
+ context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
135
+
136
+ # change view [b, np, sq, hn]
137
+ context_layer = context_layer.view(*output_size)
138
+
139
+ # [b, np, sq, hn] --> [sq, b, np, hn]
140
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
141
+
142
+ # [sq, b, np, hn] --> [sq, b, hp]
143
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
144
+ context_layer = context_layer.view(*new_context_layer_shape)
145
+
146
+ return context_layer
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/module.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Megatron Module"""
4
+
5
+ import torch
6
+ from torch.autograd import Variable
7
+ from torch.nn.parameter import Parameter
8
+
9
+ from megatron.core import parallel_state, tensor_parallel
10
+ from megatron.core.transformer.transformer_config import TransformerConfig
11
+
12
+
13
+ _FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
14
+ _HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
15
+ _BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor)
16
+
17
+
18
+ def param_is_not_shared(param):
19
+ return not hasattr(param, 'shared') or not param.shared
20
+
21
+
22
+ class MegatronModule(torch.nn.Module):
23
+ """Megatron specific extensions of torch Module with support
24
+ for pipelining."""
25
+
26
+ # def __init__(self, config: TransformerConfig, share_word_embeddings=True):
27
+ def __init__(self, config: TransformerConfig):
28
+ super().__init__()
29
+ self.config = config
30
+
31
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
32
+ """Use this function to override the state dict for
33
+ saving checkpoints."""
34
+ return self.state_dict(prefix=prefix, keep_vars=keep_vars)
35
+
36
+
37
+ def conversion_helper(val, conversion):
38
+ """Apply conversion to val. Recursively apply conversion if `val`
39
+ #is a nested tuple/list structure."""
40
+ if not isinstance(val, (tuple, list)):
41
+ return conversion(val)
42
+ rtn = [conversion_helper(v, conversion) for v in val]
43
+ if isinstance(val, tuple):
44
+ rtn = tuple(rtn)
45
+ return rtn
46
+
47
+
48
+ def fp32_to_float16(val, float16_convertor):
49
+ """Convert fp32 `val` to fp16/bf16"""
50
+
51
+ def half_conversion(val):
52
+ val_typecheck = val
53
+ if isinstance(val_typecheck, (Parameter, Variable)):
54
+ val_typecheck = val.data
55
+ if isinstance(val_typecheck, _FLOAT_TYPES):
56
+ val = float16_convertor(val)
57
+ return val
58
+
59
+ return conversion_helper(val, half_conversion)
60
+
61
+
62
+ def float16_to_fp32(val):
63
+ """Convert fp16/bf16 `val` to fp32"""
64
+
65
+ def float_conversion(val):
66
+ val_typecheck = val
67
+ if isinstance(val_typecheck, (Parameter, Variable)):
68
+ val_typecheck = val.data
69
+ if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
70
+ val = val.float()
71
+ return val
72
+
73
+ return conversion_helper(val, float_conversion)
74
+
75
+
76
+ class Float16Module(MegatronModule):
77
+ def __init__(self, config: TransformerConfig, module: torch.nn.Module):
78
+ super(Float16Module, self).__init__(config)
79
+ self.config = config
80
+ self.fp16 = config.fp16
81
+ self.bf16 = config.bf16
82
+
83
+ if self.fp16:
84
+ self.add_module('module', module.half())
85
+
86
+ def float16_convertor(val):
87
+ return val.half()
88
+
89
+ elif self.bf16:
90
+ self.add_module('module', module.bfloat16())
91
+
92
+ def float16_convertor(val):
93
+ return val.bfloat16()
94
+
95
+ else:
96
+ raise Exception('Either config.fp16 or config.bf16 should be True.')
97
+
98
+ self.float16_convertor = float16_convertor
99
+
100
+ def set_input_tensor(self, input_tensor):
101
+ return self.module.set_input_tensor(input_tensor)
102
+
103
+ def forward(self, *inputs, **kwargs):
104
+ if parallel_state.is_pipeline_first_stage():
105
+ inputs = fp32_to_float16(inputs, self.float16_convertor)
106
+ outputs = self.module(*inputs, **kwargs)
107
+ if parallel_state.is_pipeline_last_stage():
108
+ outputs = float16_to_fp32(outputs)
109
+ return outputs
110
+
111
+ def state_dict(self, destination=None, prefix='', keep_vars=False):
112
+ return self.module.state_dict(prefix=prefix, keep_vars=keep_vars)
113
+
114
+ def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
115
+ return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars)
116
+
117
+ def load_state_dict(self, state_dict, strict=True):
118
+ self.module.load_state_dict(state_dict, strict=strict)
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/transformer_layer.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import torch
4
+
5
+ from megatron.core.transformer.module import MegatronModule
6
+ from megatron.core.transformer.transformer_config import TransformerConfig
7
+ from megatron.core.transformer.enums import AttnType, AttnMaskType
8
+ from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add
9
+ from megatron.core.transformer.attention import SelfAttention
10
+ from megatron.core.transformer.mlp import MLP
11
+ from megatron.core.utils import make_viewless_tensor
12
+ from megatron.core.transformer.custom_layers.transformer_engine import TELayerNorm
13
+
14
+ class TransformerLayer(MegatronModule):
15
+ """A single transformer layer.
16
+
17
+ Transformer layer takes input with size [s, b, h] and returns an
18
+ output of the same size.
19
+ """
20
+
21
+ def __init__(
22
+ self, config: TransformerConfig, layer_number: int = 1, self_attn_mask_type=AttnMaskType.padding,
23
+ ):
24
+ super().__init__(config=config)
25
+ self.config: TransformerConfig = config
26
+
27
+ self.layer_number = layer_number
28
+ self.self_attn_mask_type = self_attn_mask_type
29
+
30
+ # Layernorm on the input data.
31
+ # TODO: add pytorch only layernorm
32
+ self.input_layernorm = TELayerNorm(
33
+ hidden_size=self.config.hidden_size,
34
+ eps=self.config.layernorm_epsilon,
35
+ persist_layer_norm=self.config.persist_layer_norm,
36
+ sequence_parallel=self.config.sequence_parallel,
37
+ zero_centered_gamma=self.config.layernorm_zero_centered_gamma,
38
+ )
39
+
40
+ # Self attention.
41
+ self.self_attention = SelfAttention(
42
+ config=self.config,
43
+ layer_number=layer_number,
44
+ attn_mask_type=self_attn_mask_type,
45
+ )
46
+
47
+ # Layernorm on the attention output
48
+ self.post_self_attn_layernorm = TELayerNorm(
49
+ hidden_size=self.config.hidden_size,
50
+ eps=self.config.layernorm_epsilon,
51
+ persist_layer_norm=self.config.persist_layer_norm,
52
+ sequence_parallel=self.config.sequence_parallel,
53
+ zero_centered_gamma=self.config.layernorm_zero_centered_gamma,
54
+ )
55
+
56
+ # MLP
57
+ self.mlp = MLP(config=self.config)
58
+
59
+ # @jcasper how should we handle nvfuser?
60
+ # Set bias+dropout+add fusion grad_enable execution handler.
61
+ # TORCH_MAJOR = int(torch.__version__.split('.')[0])
62
+ # TORCH_MINOR = int(torch.__version__.split('.')[1])
63
+ # use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
64
+ # self.bias_dropout_add_exec_handler = nullcontext if use_nvfuser else torch.enable_grad
65
+ self.bias_dropout_add_exec_handler = torch.enable_grad
66
+
67
+ self.bias_dropout_add_func = get_bias_dropout_add(
68
+ self.training,
69
+ self.config.bias_dropout_fusion
70
+ )
71
+
72
+ # TODO: decide how to do inference_params
73
+ def forward(
74
+ self, hidden_states, attention_mask, encoder_output=None, enc_dec_attn_mask=None, inference_params=None
75
+ ):
76
+ # hidden_states: [s, b, h]
77
+
78
+ # Layer norm at the beginning of the transformer layer.
79
+ layernorm_output = self.input_layernorm(hidden_states)
80
+ # Self attention.
81
+ attention_output_with_bias = self.self_attention(
82
+ layernorm_output, attention_mask, inference_params=inference_params
83
+ )
84
+
85
+ # Residual connection.
86
+ if self.config.apply_residual_connection_post_layernorm:
87
+ residual = layernorm_output
88
+ else:
89
+ residual = hidden_states
90
+
91
+ # bias_dropout_add fusion returning fp32 instead of bf16
92
+ with self.bias_dropout_add_exec_handler():
93
+ layernorm_input = self.bias_dropout_add_func(
94
+ attention_output_with_bias, residual, self.config.hidden_dropout
95
+ )
96
+
97
+ # Layer norm post the self attention.
98
+ layernorm_output = self.post_self_attn_layernorm(layernorm_input)
99
+
100
+ # MLP.
101
+ mlp_output_with_bias = self.mlp(layernorm_output)
102
+
103
+ # Second residual connection.
104
+ if self.config.apply_residual_connection_post_layernorm:
105
+ residual = layernorm_output
106
+ else:
107
+ residual = layernorm_input
108
+
109
+ with self.bias_dropout_add_exec_handler():
110
+ output = self.bias_dropout_add_func(
111
+ mlp_output_with_bias, residual, self.config.hidden_dropout
112
+ )
113
+
114
+ # Jit compiled function creates 'view' tensor. This tensor
115
+ # potentially gets saved in the MPU checkpoint function context,
116
+ # which rejects view tensors. While making a viewless tensor here
117
+ # won't result in memory savings (like the data loader, or
118
+ # p2p_communication), it serves to document the origin of this
119
+ # 'view' tensor.
120
+ output = make_viewless_tensor(inp=output, requires_grad=output.requires_grad, keep_graph=True)
121
+
122
+ return output
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/utils.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Utility functions used throughout Megatron core"""
4
+ from functools import reduce
5
+ import math
6
+ import operator
7
+
8
+ import torch
9
+
10
+ from megatron.core import parallel_state
11
+ from megatron import get_args
12
+
13
+ from deepspeed import get_accelerator
14
+
15
+
16
+ def ensure_divisibility(numerator, denominator):
17
+ """Ensure that numerator is divisible by the denominator."""
18
+ assert numerator % denominator == 0, "{} is not divisible by {}".format(
19
+ numerator, denominator
20
+ )
21
+
22
+
23
+ def divide(numerator, denominator):
24
+ """Ensure that numerator is divisible by the denominator and return
25
+ the division value."""
26
+ ensure_divisibility(numerator, denominator)
27
+ return numerator // denominator
28
+
29
+ def get_attr_wrapped_model(model, attr, allow_none=True):
30
+ """Get an attribute from a wrapped model"""
31
+ if isinstance(model, list):
32
+ raise RuntimeError("_get_attr_wrapped_model given a list of models")
33
+
34
+ if allow_none:
35
+ def condition(model, attr):
36
+ return not hasattr(model, attr)
37
+ else:
38
+ def condition(model, attr):
39
+ return getattr(model, attr, None) is None
40
+
41
+ while condition(model, attr):
42
+ if not hasattr(model, "module"):
43
+ raise RuntimeError(f"_get_attr_wrapped_model couldn't find attribute {attr}")
44
+
45
+ model = model.module
46
+ return getattr(model, attr)
47
+
48
+ def get_model_type(model):
49
+ return get_attr_wrapped_model(model, 'model_type')
50
+
51
+ def get_model_config(model):
52
+ args = get_args()
53
+ if args.deepspeed:
54
+ return get_attr_wrapped_model(model.module, 'config', allow_none=False)
55
+ return get_attr_wrapped_model(model, 'config', allow_none=False)
56
+
57
+ class GlobalMemoryBuffer:
58
+ """Global buffer to avoid dynamic memory allocations.
59
+ Caller should ensure that buffers of the same name
60
+ are not used concurrently."""
61
+
62
+ def __init__(self):
63
+ self.buffer = {}
64
+
65
+ def get_tensor(self, tensor_shape, dtype, name):
66
+ required_len = reduce(operator.mul, tensor_shape, 1)
67
+ if self.buffer.get((name, dtype), None) is None or \
68
+ self.buffer[(name, dtype)].numel() < required_len:
69
+ self.buffer[(name, dtype)] = \
70
+ torch.empty(required_len,
71
+ dtype=dtype,
72
+ device=get_accelerator().current_device_name(),
73
+ requires_grad=False)
74
+
75
+ return self.buffer[(name, dtype)][0:required_len].view(*tensor_shape)
76
+
77
+ def _kernel_make_viewless_tensor(inp, requires_grad):
78
+ '''Make a viewless tensor.
79
+
80
+ View tensors have the undesirable side-affect of retaining a reference
81
+ to the originally-viewed tensor, even after manually setting the '.data'
82
+ field. This method creates a new tensor that links to the old tensor's
83
+ data, without linking the viewed tensor, referenced via the '._base'
84
+ field.
85
+ '''
86
+ out = torch.empty(
87
+ (1,),
88
+ dtype = inp.dtype,
89
+ device = inp.device,
90
+ requires_grad = requires_grad,
91
+ )
92
+ out.data = inp.data
93
+ return out
94
+
95
+ class MakeViewlessTensor(torch.autograd.Function):
96
+ '''
97
+ Autograd function to make a viewless tensor.
98
+
99
+ This function should be used in cases where the computation graph needs
100
+ to be propagated, but we only want a viewless tensor (e.g.,
101
+ ParallelTransformer's hidden_states). Call this function by passing
102
+ 'keep_graph = True' to 'make_viewless_tensor()'.
103
+ '''
104
+ @staticmethod
105
+ def forward(ctx, inp, requires_grad):
106
+ return _kernel_make_viewless_tensor(inp, requires_grad)
107
+ @staticmethod
108
+ def backward(ctx, grad_output):
109
+ return grad_output, None
110
+
111
+ def make_viewless_tensor(inp, requires_grad, keep_graph):
112
+ '''
113
+ Entry-point for creating viewless tensors.
114
+
115
+ This method should be used, rather than calling 'MakeViewlessTensor'
116
+ or '_kernel_make_viewless_tensor' directly. This method acts as a
117
+ switch for determining if an autograd function or a regular method
118
+ should be used to create the tensor.
119
+ '''
120
+
121
+ # return tensor as-is, if not a 'view'
122
+ if inp._base is None:
123
+ return inp
124
+
125
+ # create viewless tensor
126
+ if keep_graph:
127
+ return MakeViewlessTensor.apply(inp, requires_grad)
128
+ else:
129
+ return _kernel_make_viewless_tensor(inp, requires_grad)
130
+
131
+ def assert_viewless_tensor(tensor, extra_msg = None):
132
+ '''Assert that a tensor is not a view (i.e., its '._base' field is
133
+ not set).'''
134
+ if isinstance(tensor, list):
135
+ [ assert_viewless_tensor(t) for t in tensor ]
136
+ return tensor
137
+ if not isinstance(tensor, torch.Tensor):
138
+ return tensor
139
+ assert tensor._base is None, (
140
+ "Ensure tensor._base is None before setting tensor.data or storing "
141
+ "tensor to memory buffer. Otherwise, a memory leak will occur (and "
142
+ "likely accumulate over iterations). %s"
143
+ ) % extra_msg
144
+ return tensor
145
+
146
+ def safely_set_viewless_tensor_data(tensor, new_data_tensor):
147
+ '''Safely set tensor's '.data' field.
148
+
149
+ Check first that the tensor is viewless (i.e., '._base' not set). If not,
150
+ raise an exception.
151
+ '''
152
+ assert_viewless_tensor(tensor, extra_msg = "FYI, tensor._base has shape %s, and new_data_tensor has shape %s." % ("--" if tensor._base is None else tensor._base.shape, new_data_tensor.shape))
153
+ tensor.data = new_data_tensor
154
+
155
+ def init_method_normal(sigma):
156
+ """Init method based on N(0, sigma)."""
157
+
158
+ def init_(tensor):
159
+ return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
160
+
161
+ return init_
162
+
163
+
164
+ def scaled_init_method_normal(sigma, num_layers):
165
+ """Init method based on N(0, sigma/sqrt(2*num_layers)."""
166
+ std = sigma / math.sqrt(2.0 * num_layers)
167
+
168
+ def init_(tensor):
169
+ return torch.nn.init.normal_(tensor, mean=0.0, std=std)
170
+
171
+ return init_
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/blendable_dataset.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/dataset_utils.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/indexed_dataset.cpython-310.pyc ADDED
Binary file (20.2 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/vit_dataset.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/gpt_dataset.py ADDED
@@ -0,0 +1,664 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
2
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3
+
4
+ """GPT style dataset."""
5
+
6
+ import hashlib
7
+ import os
8
+ import time
9
+
10
+ import numpy as np
11
+ import torch
12
+ from deepspeed.accelerator import get_accelerator
13
+ from megatron import print_rank_0, is_rank_0, get_args
14
+ from megatron.core import mpu
15
+ from megatron.data.blendable_dataset import BlendableDataset
16
+ from megatron.data.dataset_utils import get_datasets_weights_and_num_samples
17
+ from megatron.data.dataset_utils import get_train_valid_test_split_
18
+ from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
19
+
20
+
21
+ def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
22
+ train_valid_test_num_samples,
23
+ seq_length, seed, skip_warmup,
24
+ train_data_prefix=None,
25
+ valid_data_prefix=None,
26
+ test_data_prefix=None,
27
+ return_doc_ids=False, *,
28
+ data_cache_path=None,
29
+ use_seq_len_plus_one_tokens=True):
30
+ """Build train, valid, and test datasets."""
31
+
32
+ if data_prefix:
33
+ print_rank_0("Single data path provided for train, valid & test")
34
+
35
+ # Single dataset.
36
+ if len(data_prefix) == 1:
37
+ return _build_train_valid_test_datasets(data_prefix[0],
38
+ data_impl, splits_string,
39
+ train_valid_test_num_samples,
40
+ seq_length, seed, skip_warmup,
41
+ data_cache_path=data_cache_path,
42
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
43
+
44
+ # Blending dataset.
45
+ # Parse the values.
46
+ output = get_datasets_weights_and_num_samples(data_prefix,
47
+ train_valid_test_num_samples)
48
+ prefixes, weights, datasets_train_valid_test_num_samples = output
49
+ train_num_samples, valid_num_samples, test_num_samples = map(
50
+ sum,
51
+ zip(*datasets_train_valid_test_num_samples)
52
+ )
53
+
54
+ # Build individual datasets.
55
+ train_datasets = []
56
+ valid_datasets = []
57
+ test_datasets = []
58
+ for i in range(len(prefixes)):
59
+ train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
60
+ prefixes[i], data_impl, splits_string,
61
+ datasets_train_valid_test_num_samples[i],
62
+ seq_length, seed, skip_warmup,
63
+ return_doc_ids,
64
+ data_cache_path=data_cache_path,
65
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
66
+ if train_ds:
67
+ train_datasets.append(train_ds)
68
+ if valid_ds:
69
+ valid_datasets.append(valid_ds)
70
+ if test_ds:
71
+ test_datasets.append(test_ds)
72
+
73
+ # Blend.
74
+ blending_train_dataset = None
75
+ if train_datasets:
76
+ blending_train_dataset = BlendableDataset(train_datasets, weights, train_num_samples,
77
+ data_cache_path=data_cache_path)
78
+ blending_valid_dataset = None
79
+ if valid_datasets:
80
+ blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_num_samples,
81
+ data_cache_path=data_cache_path)
82
+ blending_test_dataset = None
83
+ if test_datasets:
84
+ blending_test_dataset = BlendableDataset(test_datasets, weights, test_num_samples,
85
+ data_cache_path=data_cache_path)
86
+
87
+ return (blending_train_dataset, blending_valid_dataset,
88
+ blending_test_dataset)
89
+
90
+ else:
91
+ print_rank_0("Separate data paths provided for train, valid & test. Split string will be ignored.")
92
+
93
+ train_dataset, valid_dataset, test_dataset = None, None, None
94
+ # Single dataset.
95
+ if train_data_prefix is not None:
96
+ train_dataset = build_dataset("train", train_data_prefix, data_impl,
97
+ splits_string,
98
+ train_valid_test_num_samples[0],
99
+ seq_length, seed, skip_warmup,
100
+ data_cache_path=data_cache_path,
101
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
102
+
103
+ if valid_data_prefix is not None:
104
+ valid_dataset = build_dataset("valid", valid_data_prefix, data_impl,
105
+ splits_string,
106
+ train_valid_test_num_samples[1],
107
+ seq_length, seed, False,
108
+ data_cache_path=data_cache_path,
109
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
110
+
111
+
112
+ if test_data_prefix is not None:
113
+ test_dataset = build_dataset("test", test_data_prefix, data_impl,
114
+ splits_string,
115
+ train_valid_test_num_samples[2],
116
+ seq_length, seed, False,
117
+ data_cache_path=data_cache_path,
118
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
119
+
120
+ return (train_dataset, valid_dataset, test_dataset)
121
+
122
+
123
+ def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
124
+ train_valid_test_num_samples,
125
+ seq_length, seed, skip_warmup,
126
+ return_doc_ids=False, *,
127
+ data_cache_path=None,
128
+ use_seq_len_plus_one_tokens):
129
+ """Build train, valid, and test datasets."""
130
+
131
+ # Indexed dataset.
132
+ indexed_dataset = get_indexed_dataset_(data_prefix,
133
+ data_impl,
134
+ skip_warmup)
135
+
136
+ total_num_of_documents = indexed_dataset.sizes.shape[0]
137
+ splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
138
+
139
+ # Print stats about the splits.
140
+ print_rank_0(' > dataset split:')
141
+
142
+ def print_split_stats(name, index):
143
+ print_rank_0(' {}:'.format(name))
144
+ print_rank_0(' document indices in [{}, {}) total of {} '
145
+ 'documents'.format(splits[index], splits[index + 1],
146
+ splits[index + 1] - splits[index]))
147
+ print_split_stats('train', 0)
148
+ print_split_stats('validation', 1)
149
+ print_split_stats('test', 2)
150
+
151
+ def build_dataset(index, name):
152
+ dataset = None
153
+ if splits[index + 1] > splits[index]:
154
+ documents = np.arange(start=splits[index], stop=splits[index + 1],
155
+ step=1, dtype=np.int32)
156
+ dataset = GPTDataset(name, data_prefix, documents, indexed_dataset,
157
+ splits_string,
158
+ train_valid_test_num_samples[index],
159
+ seq_length, seed,
160
+ return_doc_ids,
161
+ data_cache_path=data_cache_path,
162
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
163
+ return dataset
164
+
165
+ train_dataset = build_dataset(0, 'train')
166
+ valid_dataset = build_dataset(1, 'valid')
167
+ test_dataset = build_dataset(2, 'test')
168
+
169
+ return (train_dataset, valid_dataset, test_dataset)
170
+
171
+
172
+ def build_dataset(dataset_name, data_prefix, data_impl,
173
+ splits_string, num_samples,
174
+ seq_length, seed, skip_warmup,
175
+ *,
176
+ data_cache_path=None,
177
+ use_seq_len_plus_one_tokens=True):
178
+ dataset = None
179
+ if len(data_prefix) == 1:
180
+ dataset = _build_dataset(dataset_name, data_prefix[0], data_impl,
181
+ splits_string, num_samples, seq_length,
182
+ seed, skip_warmup,
183
+ data_cache_path=data_cache_path,
184
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
185
+ else:
186
+ # Blending dataset.
187
+ # Parse the values.
188
+ output = get_datasets_weights_and_num_samples(data_prefix, num_samples)
189
+ prefixes, weights, dataset_num_samples = output
190
+ num_samples = sum(dataset_num_samples)
191
+
192
+ # Build individual datasets.
193
+ datasets = []
194
+ for i in range(len(prefixes)):
195
+ ds = _build_dataset(dataset_name, prefixes[i], data_impl,
196
+ splits_string, dataset_num_samples[i],
197
+ seq_length, seed, skip_warmup,
198
+ data_cache_path=data_cache_path,
199
+ use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
200
+ if ds:
201
+ datasets.append(ds)
202
+
203
+ if datasets:
204
+ dataset = BlendableDataset(datasets, weights, num_samples,
205
+ data_cache_path=data_cache_path)
206
+
207
+ return dataset
208
+
209
+
210
+ def _build_dataset(dataset_name, data_prefix, data_impl, splits_string,
211
+ num_samples, seq_length, seed, skip_warmup,
212
+ *,
213
+ data_cache_path=None,
214
+ use_seq_len_plus_one_tokens=True):
215
+ """
216
+ Build dataset. This method is called when individual
217
+ train, valid, test datasets are provided
218
+ """
219
+
220
+ # Indexed dataset.
221
+ indexed_dataset = get_indexed_dataset_(data_prefix,
222
+ data_impl,
223
+ skip_warmup)
224
+
225
+ total_num_of_documents = indexed_dataset.sizes.shape[0]
226
+
227
+ print_rank_0(' {}:'.format(dataset_name))
228
+ print_rank_0(' document indices in [0, {}) total of {} '
229
+ 'documents'.format(total_num_of_documents, total_num_of_documents))
230
+
231
+ documents = np.arange(start=0, stop=total_num_of_documents,
232
+ step=1, dtype=np.int32)
233
+
234
+ dataset = GPTDataset(dataset_name, data_prefix, documents, indexed_dataset,
235
+ splits_string, num_samples, seq_length, seed,
236
+ data_cache_path=data_cache_path, use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens)
237
+
238
+ return dataset
239
+
240
+
241
+ def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
242
+ """Build indexed dataset."""
243
+ print_rank_0(' > building dataset index ...')
244
+
245
+ start_time = time.time()
246
+ indexed_dataset = make_indexed_dataset(data_prefix,
247
+ data_impl,
248
+ skip_warmup)
249
+ print_rank_0(' > finished creating indexed dataset in {:4f} '
250
+ 'seconds'.format(time.time() - start_time))
251
+ print_rank_0(' number of documents: {}'.format(
252
+ indexed_dataset.sizes.shape[0]))
253
+
254
+ return indexed_dataset
255
+
256
+
257
+ class GPTDataset(torch.utils.data.Dataset):
258
+
259
+ def __init__(self, name, data_prefix, documents, indexed_dataset,
260
+ splits_string, num_samples, seq_length, seed,
261
+ return_doc_ids=False, *,
262
+ data_cache_path=None,
263
+ use_seq_len_plus_one_tokens):
264
+
265
+ self.name = name
266
+ self.indexed_dataset = indexed_dataset
267
+ self.return_doc_ids = return_doc_ids
268
+ self.seq_length = seq_length
269
+ self.add_extra_token = 0
270
+ if use_seq_len_plus_one_tokens:
271
+ self.add_extra_token = 1
272
+
273
+ # Checks
274
+ assert np.min(documents) >= 0
275
+ assert np.max(documents) < indexed_dataset.sizes.shape[0]
276
+
277
+ # Build index mappings.
278
+ self.doc_idx, self.sample_idx, self.shuffle_idx, self.desc, self.desc_hash = \
279
+ _build_index_mappings(self.name, data_prefix,
280
+ documents, self.indexed_dataset.sizes,
281
+ splits_string, num_samples, seq_length, seed,
282
+ data_cache_path=data_cache_path, add_extra_token=self.add_extra_token)
283
+
284
+
285
+ def __len__(self):
286
+ # -1 is due to data structure used to retieve the index:
287
+ # sample i --> [sample_idx[i], sample_idx[i+1])
288
+ return self.sample_idx.shape[0] - 1
289
+
290
+ def __getitem__(self, idx):
291
+ args = get_args()
292
+ dummy_sample = idx < 0
293
+ idx = np.abs(idx)
294
+ orig_idx = idx
295
+ # Get the shuffled index.
296
+ idx = self.shuffle_idx[idx]
297
+ # Start and end documents and offsets.
298
+ doc_index_f = self.sample_idx[idx][0]
299
+ doc_index_l = self.sample_idx[idx + 1][0]
300
+ offset_f = self.sample_idx[idx][1]
301
+ offset_l = self.sample_idx[idx + 1][1]
302
+ # If we are within the same document, just extract the chunk.
303
+ doc_ids = []
304
+ if doc_index_f == doc_index_l:
305
+ doc_ids.append(self.doc_idx[doc_index_f])
306
+ sample = self.indexed_dataset.get(self.doc_idx[doc_index_f],
307
+ offset=offset_f,
308
+ length=offset_l - offset_f + self.add_extra_token)
309
+ else:
310
+ # Otherwise, get the rest of the initial document.
311
+ doc_ids.append(self.doc_idx[doc_index_f])
312
+ sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f],
313
+ offset=offset_f)]
314
+ # Loop over all in between documents and add the entire document.
315
+ for i in range(doc_index_f + 1, doc_index_l):
316
+ doc_ids.append(self.doc_idx[i])
317
+ sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
318
+ # And finally add the relevant portion of last document.
319
+ doc_ids.append(self.doc_idx[doc_index_l])
320
+ sample_list.append(self.indexed_dataset.get(
321
+ self.doc_idx[doc_index_l],
322
+ length=offset_l + self.add_extra_token))
323
+ sample = np.concatenate(sample_list)
324
+
325
+ text_name = 'text'
326
+ if args.use_dataset_only:
327
+ text_name = 'input_ids'
328
+ sample_dict = {text_name: np.array(sample, dtype=np.int64)}
329
+ if args.return_data_index:
330
+ sample_dict.update({'index': np.array([orig_idx], dtype=np.int64)})
331
+
332
+ if self.return_doc_ids: # for retro preprocessing
333
+ sample_dict.update({'doc_ids': np.array(doc_ids, dtype=np.int64)})
334
+
335
+ if args.use_dataset_only:
336
+ sample_dict.update({'labels': np.array(sample, dtype=np.int64)})
337
+
338
+ if len(sample) != (self.seq_length + self.add_extra_token):
339
+ sample = np.array(sample, dtype=np.int64)
340
+ sample = np.pad(sample, (0, self.seq_length + self.add_extra_token - len(sample)), mode='constant', constant_values=-1)
341
+
342
+ if args.return_data_index:
343
+ return {'text': np.array(sample, dtype=np.int64),
344
+ 'index': np.array([orig_idx], dtype=np.int64)}
345
+ elif self.return_doc_ids: # for retro preprocessing
346
+ return {'text': np.array(sample, dtype=np.int64),
347
+ 'doc_ids': np.array(doc_ids, dtype=np.int64)}
348
+ else:
349
+ return {'text': np.array(sample, dtype=np.int64),
350
+ 'dummy_sample': np.array(int(dummy_sample), dtype=np.int64)}
351
+
352
+ return sample_dict
353
+
354
+ def _build_index_mappings(name, data_prefix, documents, sizes,
355
+ splits_string, num_samples, seq_length, seed,
356
+ *,
357
+ data_cache_path, add_extra_token):
358
+ """Build doc-idx, sample-idx, and shuffle-idx.
359
+ doc-idx: is an array (ordered) of documents to be used in training.
360
+ sample-idx: is the start document index and document offset for each
361
+ training sample.
362
+ shuffle-idx: maps the sample index into a random index into sample-idx.
363
+ """
364
+ args = get_args()
365
+ # Number of tokens in each epoch and number of required epochs.
366
+ tokens_per_epoch = _num_tokens(documents, sizes)
367
+ num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token)
368
+ if num_samples < 0:
369
+ print_num_samples = tokens_per_epoch // seq_length
370
+ else:
371
+ print_num_samples = num_samples
372
+ if args.train_data_exact_num_epochs is not None and name == 'train':
373
+ num_epochs = args.train_data_exact_num_epochs
374
+
375
+ # rng state
376
+ np_rng = np.random.RandomState(seed=seed)
377
+
378
+ # Filename of the index mappings.
379
+ desc = "GPT Dataset\n\n"
380
+ desc += f"Data prefix {data_prefix}\n"
381
+ desc += f"Dataset name {name}\n"
382
+ desc += f"Number of samples {print_num_samples}\n"
383
+ desc += f"Number of epochs {num_epochs}\n"
384
+ desc += f"Sequence length {seq_length}\n"
385
+ desc += f"Random seed {seed}\n"
386
+ desc += f"Split {splits_string}\n"
387
+ desc_hash = hashlib.md5(desc.encode('utf-8')).hexdigest()
388
+ desc_filename = desc_hash + ".dsc"
389
+ doc_idx_filename = desc_hash + '_doc_idx.npy'
390
+ sample_idx_filename = desc_hash + '_sample_idx.npy'
391
+ shuffle_idx_filename = desc_hash + '_shuffle_idx.npy'
392
+
393
+ if name == 'train':
394
+ # force to use certain index files
395
+ if args.train_desc_path is not None:
396
+ desc_filename = args.train_desc_path
397
+ if args.train_doc_idx_path is not None:
398
+ doc_idx_filename = args.train_doc_idx_path
399
+ if args.train_sample_idx_path is not None:
400
+ sample_idx_filename = args.train_sample_idx_path
401
+ if args.train_shuffle_idx_path is not None:
402
+ shuffle_idx_filename = args.train_shuffle_idx_path
403
+
404
+ # Look for cache in main data dir first to avoid unnecessary
405
+ # duplication, then look in data-cache-path if specified,
406
+ # If nothing is found, use the last path looked in
407
+ build_indices = True
408
+ prefixes = [os.path.join(os.path.dirname(data_prefix), 'index-cache')]
409
+ if data_cache_path is not None:
410
+ prefixes.append(data_cache_path)
411
+ for prefix in prefixes:
412
+ idx_path = {
413
+ 'desc': os.path.join(prefix, desc_filename),
414
+ 'doc': os.path.join(prefix, doc_idx_filename),
415
+ 'sample': os.path.join(prefix, sample_idx_filename),
416
+ 'shuffle': os.path.join(prefix, shuffle_idx_filename)
417
+ }
418
+ for f in idx_path.values():
419
+ if not os.path.isfile(f):
420
+ break
421
+ else:
422
+ # Found our files!
423
+ build_indices = False
424
+ break
425
+ data_cache_dir = os.path.dirname(idx_path['desc'])
426
+ data_cache_success = True
427
+
428
+ # Build the indexed mapping if not exist.
429
+ if build_indices and is_rank_0():
430
+ print_rank_0(' > WARNING: could not find index map files, building '
431
+ 'the indices on rank 0 ...')
432
+
433
+ # For the last epoch, decide whether include the entire epoch
434
+ # in the global shuffle or not.
435
+
436
+ # If we need only one epoch, then separating last epoch does
437
+ # not mean anything.
438
+ if num_epochs == 1:
439
+ separate_last_epoch = False
440
+ print(' > only one epoch required, setting '
441
+ 'separate_last_epoch to False', flush=True)
442
+
443
+ else:
444
+ # Get the number of samples for the last epoch
445
+ assert num_samples >= 0, 'number of samples should be non-negative'
446
+ num_samples_from_epochs_minus_one = (
447
+ (num_epochs - 1) * tokens_per_epoch - add_extra_token) // seq_length
448
+ last_epoch_num_samples = num_samples - \
449
+ num_samples_from_epochs_minus_one
450
+ assert last_epoch_num_samples >= 0, \
451
+ 'last epoch number of samples should be non-negative.'
452
+ num_samples_per_epoch = (tokens_per_epoch - add_extra_token) // seq_length
453
+ assert last_epoch_num_samples <= (num_samples_per_epoch + 1), \
454
+ 'last epoch number of samples exceeded max value.'
455
+ # If we have less than 80% of the samples for the last epoch,
456
+ # seperate out the epoch and treat it differently.
457
+ # Note: the 80% number is just based on common sense and can
458
+ # be adjusted if needed.
459
+ separate_last_epoch = (last_epoch_num_samples <
460
+ int(0.80 * num_samples_per_epoch))
461
+ if separate_last_epoch:
462
+ string = ' > last epoch number of samples ({}) is smaller '\
463
+ 'than 80% of number of samples per epoch ({}), '\
464
+ 'setting separate_last_epoch to True'
465
+ else:
466
+ string = ' > last epoch number of samples ({}) is larger '\
467
+ 'than 80% of number of samples per epoch ({}), '\
468
+ 'setting separate_last_epoch to False'
469
+ print(string.format(last_epoch_num_samples,
470
+ num_samples_per_epoch), flush=True)
471
+
472
+
473
+ try:
474
+ os.makedirs(data_cache_dir, exist_ok=True)
475
+
476
+ # description
477
+ with open(idx_path['desc'], 'wt') as fd:
478
+ fd.write(desc)
479
+
480
+ # doc-idx.
481
+ start_time = time.time()
482
+ doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
483
+ separate_last_epoch)
484
+ np.save(idx_path['doc'], doc_idx, allow_pickle=True)
485
+ print_rank_0(' > elasped time to build and save doc-idx mapping '
486
+ '(seconds): {:4f}'.format(time.time() - start_time))
487
+ # sample-idx.
488
+ start_time = time.time()
489
+ # Use C++ implementation for speed.
490
+ # First compile and then import.
491
+ from megatron.data import helpers
492
+ assert doc_idx.dtype == np.int32
493
+ assert sizes.dtype == np.int32
494
+ sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
495
+ num_epochs, tokens_per_epoch,
496
+ num_samples < 0, add_extra_token)
497
+ np.save(idx_path['sample'], sample_idx, allow_pickle=True)
498
+ print_rank_0(' > elasped time to build and save sample-idx mapping '
499
+ '(seconds): {:4f}'.format(time.time() - start_time))
500
+ # shuffle-idx.
501
+ start_time = time.time()
502
+ # -1 is due to data structure used to retieve the index:
503
+ # sample i --> [sample_idx[i], sample_idx[i+1])
504
+ if separate_last_epoch:
505
+ num_samples_ = num_samples_from_epochs_minus_one
506
+ else:
507
+ num_samples_ = sample_idx.shape[0] - 1
508
+ shuffle_idx = _build_shuffle_idx(num_samples_,
509
+ sample_idx.shape[0] - 1, np_rng)
510
+ np.save(idx_path['shuffle'], shuffle_idx, allow_pickle=True)
511
+ print_rank_0(' > elasped time to build and save shuffle-idx mapping'
512
+ ' (seconds): {:4f}'.format(time.time() - start_time))
513
+ except OSError:
514
+ print(f'There was an error trying to create the data cache directory ({data_cache_dir})')
515
+ print('or a file in it. This defaults to a directory "index-cache" within the directory')
516
+ print('the data files are in and can be set with the --data-cache-path argument. Please')
517
+ print('ensure you have write access to this directory or specify one that you do have')
518
+ print('write access to.')
519
+ data_cache_success = False
520
+
521
+ counts = get_accelerator().LongTensor([data_cache_success])
522
+ torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
523
+ torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
524
+ if counts[0].item() != (
525
+ torch.distributed.get_world_size() //
526
+ torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()) //
527
+ torch.distributed.get_world_size(group=mpu.get_sequence_parallel_group())):
528
+ print_rank_0("Data index creation unsuccessful, exiting.")
529
+ exit()
530
+
531
+ # Load mappings.
532
+ start_time = time.time()
533
+ print_rank_0(f" > loading doc-idx mapping from {idx_path['doc']}")
534
+ doc_idx = np.load(idx_path['doc'], allow_pickle=True, mmap_mode='r')
535
+
536
+ print_rank_0(f" > loading sample-idx mapping from {idx_path['sample']}")
537
+ sample_idx = np.load(idx_path['sample'], allow_pickle=True, mmap_mode='r')
538
+
539
+ print_rank_0(f" > loading shuffle-idx mapping from {idx_path['shuffle']}")
540
+ shuffle_idx = np.load(idx_path['shuffle'], allow_pickle=True, mmap_mode='r')
541
+
542
+ print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
543
+ time.time() - start_time))
544
+ print_rank_0(' total number of samples: {}'.format(
545
+ sample_idx.shape[0]))
546
+ print_rank_0(' total number of epochs: {}'.format(num_epochs))
547
+
548
+ return doc_idx, sample_idx, shuffle_idx, desc, desc_hash
549
+
550
+
551
+ def _num_tokens(documents, sizes):
552
+ """Total number of tokens in the dataset."""
553
+ return np.sum(sizes[documents])
554
+
555
+
556
+ def _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token):
557
+ """Based on number of samples and sequence lenght, calculate how many
558
+ epochs will be needed."""
559
+ num_epochs = 0
560
+ total_tokens = 0
561
+ while True:
562
+ num_epochs += 1
563
+ total_tokens += tokens_per_epoch
564
+ # -1 is because we need to retrieve seq_length + 1 token each time
565
+ # but the last token will overlap with the first token of the next
566
+ # sample except for the last sample.
567
+ if ((total_tokens - add_extra_token) // seq_length) >= num_samples:
568
+ return num_epochs
569
+
570
+
571
+ def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
572
+ """Build an array with length = number-of-epochs * number-of-dcuments.
573
+ Each index is mapped to a corresponding document."""
574
+ if not separate_last_epoch or num_epochs == 1:
575
+ doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1]
576
+ doc_idx[:] = documents
577
+ doc_idx = doc_idx.reshape(-1)
578
+ doc_idx = doc_idx.astype(np.int32)
579
+ np_rng.shuffle(doc_idx)
580
+ return doc_idx
581
+
582
+ doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False)
583
+ doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
584
+ return np.concatenate((doc_idx_first, doc_idx_last))
585
+
586
+
587
+ def _build_sample_idx(sizes, doc_idx, seq_length,
588
+ num_epochs, tokens_per_epoch,
589
+ keep_last_sequence, add_extra_token):
590
+ """Sample index mapping is a 2D array with sizes
591
+ [number-of-samples + 1, 2] where [..., 0] contains
592
+ the index into `doc_idx` and [..., 1] is the
593
+ starting offset in that document."""
594
+
595
+ # Total number of samples. For -1 see comments in `_num_epochs`.
596
+ if keep_last_sequence:
597
+ import math
598
+ num_samples = math.ceil((num_epochs * tokens_per_epoch - add_extra_token) / seq_length)
599
+ else:
600
+ num_samples = (num_epochs * tokens_per_epoch - add_extra_token) // seq_length
601
+ sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
602
+
603
+ # Index into sample_idx.
604
+ sample_index = 0
605
+ # Index into doc_idx.
606
+ doc_idx_index = 0
607
+ # Begining offset for each document.
608
+ doc_offset = 0
609
+ # Start with first document and no offset.
610
+ sample_idx[sample_index][0] = doc_idx_index
611
+ sample_idx[sample_index][1] = doc_offset
612
+ sample_index += 1
613
+ while sample_index <= num_samples:
614
+ # Start with a fresh sequence.
615
+ remaining_seq_length = seq_length + add_extra_token
616
+ while remaining_seq_length != 0:
617
+ # Get the document length.
618
+ doc_id = doc_idx[doc_idx_index]
619
+ doc_length = sizes[doc_id] - doc_offset
620
+ # And add it to the current sequence.
621
+ remaining_seq_length -= doc_length
622
+ # If we have more than a full sequence, adjust offset and set
623
+ # remaining length to zero so we return from the while loop.
624
+ # Note that -1 here is for the same reason we have -1 in
625
+ # `_num_epochs` calculations.
626
+ if remaining_seq_length <= 0:
627
+ doc_offset += (remaining_seq_length + doc_length - add_extra_token)
628
+ remaining_seq_length = 0
629
+ else:
630
+ # Otherwise, start from the begining of the next document.
631
+ if doc_idx_index == (len(doc_idx) - 1):
632
+ assert sample_index == num_samples, F"sample_index={sample_index} and num_samples={num_samples} should be the same"
633
+ doc_offset = sizes[doc_idx[doc_idx_index]] - add_extra_token
634
+ break
635
+ doc_idx_index += 1
636
+ doc_offset = 0
637
+ # Record the sequence.
638
+ sample_idx[sample_index][0] = doc_idx_index
639
+ sample_idx[sample_index][1] = doc_offset
640
+ sample_index += 1
641
+
642
+ return sample_idx
643
+
644
+
645
+ def _build_shuffle_idx(num_samples, total_size, np_rng):
646
+ """Build the range [0, size) and shuffle."""
647
+ print(' > building shuffle index with split [0, {}) and [{}, {}) '
648
+ '...'.format(num_samples, num_samples, total_size), flush=True)
649
+
650
+ dtype_ = np.uint32
651
+ if total_size >= (np.iinfo(np.uint32).max - 1):
652
+ dtype_ = np.int64
653
+
654
+ shuffle_idx_first = np.arange(start=0, stop=num_samples,
655
+ step=1, dtype=dtype_)
656
+ np_rng.shuffle(shuffle_idx_first)
657
+ if num_samples == total_size:
658
+ return shuffle_idx_first
659
+
660
+ shuffle_idx_last = np.arange(start=num_samples, stop=total_size,
661
+ step=1, dtype=dtype_)
662
+ np_rng.shuffle(shuffle_idx_last)
663
+
664
+ return np.concatenate((shuffle_idx_first, shuffle_idx_last))
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/ict_dataset.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import random
3
+
4
+ import numpy as np
5
+ from torch.utils.data import Dataset
6
+
7
+ from megatron import get_tokenizer
8
+ from megatron import get_args
9
+ from megatron.data.dataset_utils import get_indexed_dataset_
10
+ from megatron.data.realm_dataset_utils import get_block_samples_mapping
11
+
12
+ def make_attention_mask(source_block, target_block):
13
+ """
14
+ Returns a 2-dimensional (2-D) attention mask
15
+ :param source_block: 1-D array
16
+ :param target_block: 1-D array
17
+ """
18
+ mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1)
19
+ mask = mask.astype(np.int64)
20
+ # (source_length, target_length)
21
+ return mask
22
+
23
+ def get_ict_dataset(use_titles=True, query_in_block_prob=1):
24
+ """Get a dataset which uses block samples mappings to get ICT/block indexing data (via get_block())
25
+ rather than for training, since it is only built with a single epoch sample mapping.
26
+ """
27
+ args = get_args()
28
+ block_dataset = get_indexed_dataset_(args.data_path, 'mmap', True)
29
+ titles_dataset = get_indexed_dataset_(args.titles_data_path, 'mmap', True)
30
+
31
+ kwargs = dict(
32
+ name='full',
33
+ block_dataset=block_dataset,
34
+ title_dataset=titles_dataset,
35
+ data_prefix=args.data_path,
36
+ num_epochs=1,
37
+ max_num_samples=None,
38
+ max_seq_length=args.seq_length,
39
+ seed=1,
40
+ query_in_block_prob=query_in_block_prob,
41
+ use_titles=use_titles,
42
+ use_one_sent_docs=args.use_one_sent_docs
43
+ )
44
+ dataset = ICTDataset(**kwargs)
45
+ return dataset
46
+
47
+
48
+ class ICTDataset(Dataset):
49
+ """Dataset containing sentences and their blocks for an inverse cloze task."""
50
+ def __init__(self, name, block_dataset, title_dataset, data_prefix,
51
+ num_epochs, max_num_samples, max_seq_length, query_in_block_prob,
52
+ seed, use_titles=True, use_one_sent_docs=False, binary_head=False):
53
+ self.name = name
54
+ self.seed = seed
55
+ self.max_seq_length = max_seq_length
56
+ self.query_in_block_prob = query_in_block_prob
57
+ self.block_dataset = block_dataset
58
+ self.title_dataset = title_dataset
59
+ self.rng = random.Random(self.seed)
60
+ self.use_titles = use_titles
61
+ self.use_one_sent_docs = use_one_sent_docs
62
+
63
+ self.samples_mapping = get_block_samples_mapping(
64
+ block_dataset, title_dataset, data_prefix, num_epochs,
65
+ max_num_samples, max_seq_length, seed, name, use_one_sent_docs)
66
+ self.tokenizer = get_tokenizer()
67
+ self.vocab_id_list = list(self.tokenizer.inv_vocab.keys())
68
+ self.vocab_id_to_token_list = self.tokenizer.inv_vocab
69
+ self.cls_id = self.tokenizer.cls
70
+ self.sep_id = self.tokenizer.sep
71
+ self.mask_id = self.tokenizer.mask
72
+ self.pad_id = self.tokenizer.pad
73
+
74
+ def __len__(self):
75
+ return len(self.samples_mapping)
76
+
77
+ def __getitem__(self, idx):
78
+ """Get an ICT example of a pseudo-query and the block of text from which it was extracted"""
79
+ sample_data = self.samples_mapping[idx]
80
+ start_idx, end_idx, doc_idx, block_idx = sample_data.as_tuple()
81
+
82
+ if self.use_titles:
83
+ title = self.title_dataset[int(doc_idx)]
84
+ title_pad_offset = 3 + len(title)
85
+ else:
86
+ title = None
87
+ title_pad_offset = 2
88
+ block = [self.block_dataset[i] for i in range(start_idx, end_idx)]
89
+ assert len(block) > 1 or self.use_one_sent_docs or self.query_in_block_prob == 1
90
+
91
+ # randint() is inclusive for Python rng
92
+ rand_sent_idx = self.rng.randint(0, len(block) - 1)
93
+
94
+ # keep the query in the context query_in_block_prob fraction of the time.
95
+ if self.rng.random() < self.query_in_block_prob:
96
+ query = block[rand_sent_idx].copy()
97
+ else:
98
+ query = block.pop(rand_sent_idx)
99
+
100
+ # still need to truncate because blocks are concluded when
101
+ # the sentence lengths have exceeded max_seq_length.
102
+ query = query[:self.max_seq_length - 2]
103
+ block = list(itertools.chain(*block))[:self.max_seq_length - title_pad_offset]
104
+
105
+ query_tokens, query_pad_mask = self.concat_and_pad_tokens(query)
106
+ context_tokens, context_pad_mask = self.concat_and_pad_tokens(block, title)
107
+
108
+ query_mask = make_attention_mask(query_tokens, query_tokens)
109
+ context_mask = make_attention_mask(context_tokens, context_tokens)
110
+
111
+ block_data = sample_data.as_array()
112
+
113
+ sample = {
114
+ 'query_tokens': query_tokens,
115
+ 'query_mask': query_mask,
116
+ 'query_pad_mask': query_pad_mask,
117
+ 'context_tokens': context_tokens,
118
+ 'context_mask': context_mask,
119
+ 'context_pad_mask': context_pad_mask,
120
+ 'block_data': block_data,
121
+ }
122
+
123
+ return sample
124
+
125
+ def get_block(self, start_idx, end_idx, doc_idx):
126
+ """Get the IDs for an evidence block plus the title of the corresponding document"""
127
+ block = [self.block_dataset[i] for i in range(start_idx, end_idx)]
128
+ title = self.title_dataset[int(doc_idx)]
129
+
130
+ block = list(itertools.chain(*block))[:self.max_seq_length - (3 + len(title))]
131
+ block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title)
132
+
133
+ return block_tokens, block_pad_mask
134
+
135
+ def get_null_block(self):
136
+ """Get empty block and title - used in REALM pretraining"""
137
+ block, title = [], []
138
+ block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title)
139
+
140
+ return block_tokens, block_pad_mask
141
+
142
+ def concat_and_pad_tokens(self, tokens, title=None):
143
+ """Concat with special tokens and pad sequence to self.max_seq_length"""
144
+ tokens = list(tokens)
145
+ if title is None:
146
+ tokens = [self.cls_id] + tokens + [self.sep_id]
147
+ else:
148
+ title = list(title)
149
+ tokens = [self.cls_id] + title + [self.sep_id] + tokens + [self.sep_id]
150
+ assert len(tokens) <= self.max_seq_length
151
+
152
+ num_pad = self.max_seq_length - len(tokens)
153
+ pad_mask = [1] * len(tokens) + [0] * num_pad
154
+ tokens += [self.pad_id] * num_pad
155
+
156
+ return np.array(tokens), np.array(pad_mask)
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/orqa_wiki_dataset.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ """Wikipedia dataset from DPR code for ORQA."""
4
+
5
+ from abc import ABC
6
+ import csv
7
+ import numpy as np
8
+ import random
9
+ import torch
10
+ from torch.utils.data import Dataset
11
+
12
+ from megatron import print_rank_0, get_args, get_tokenizer
13
+ from megatron.core import tensor_parallel
14
+ from megatron.data.biencoder_dataset_utils import make_attention_mask
15
+
16
+ def get_open_retrieval_wiki_dataset():
17
+ args = get_args()
18
+ tokenizer = get_tokenizer()
19
+
20
+ dataset = OpenRetrievalEvidenceDataset('2018 Wikipedia from DPR codebase',
21
+ 'evidence',
22
+ args.evidence_data_path,
23
+ tokenizer,
24
+ args.retriever_seq_length)
25
+ return dataset
26
+
27
+
28
+ def get_open_retrieval_batch(data_iterator):
29
+ # Items and their type.
30
+ keys = ['row_id', 'context', 'context_mask', 'context_types',
31
+ 'context_pad_mask']
32
+ datatype = torch.int64
33
+
34
+ # Broadcast data.
35
+ data = None if data_iterator is None else next(data_iterator)
36
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
37
+
38
+ # Unpack.
39
+ row_id = data_b['row_id'].long()
40
+ context = data_b['context'].long()
41
+
42
+ # TODO: make the context mask a binary one
43
+ context_mask = (data_b['context_mask'] < 0.5)
44
+
45
+ context_types = data_b['context_types'].long()
46
+ context_pad_mask = data_b['context_pad_mask'].long()
47
+
48
+ return row_id, context, context_mask, context_types, context_pad_mask
49
+
50
+
51
+ def build_tokens_types_paddings_from_text(row, tokenizer, max_seq_length):
52
+ """Build token types and paddings, trim if needed, and pad if needed."""
53
+
54
+ title_ids = tokenizer.tokenize(row['title'])
55
+ context_ids = tokenizer.tokenize(row['text'])
56
+
57
+ # Appending the title of the context at front
58
+ extended_context_ids = title_ids + [tokenizer.sep_id] + context_ids
59
+
60
+ context_ids, context_types, context_pad_mask = \
61
+ build_tokens_types_paddings_from_ids(extended_context_ids,
62
+ max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad)
63
+
64
+ return context_ids, context_types, context_pad_mask
65
+
66
+
67
+ # noinspection DuplicatedCode
68
+ def build_tokens_types_paddings_from_ids(text_ids, max_seq_length,
69
+ cls_id, sep_id, pad_id):
70
+ """Build token types and paddings, trim if needed, and pad if needed."""
71
+ enc_ids = []
72
+ tokentypes_enc = []
73
+
74
+ # [CLS].
75
+ enc_ids.append(cls_id)
76
+ tokentypes_enc.append(0)
77
+
78
+ # A.
79
+ len_src = len(text_ids)
80
+ enc_ids.extend(text_ids)
81
+ tokentypes_enc.extend([0] * len_src)
82
+
83
+ # Cap the size.
84
+ if len(enc_ids) > max_seq_length - 1:
85
+ enc_ids = enc_ids[0: max_seq_length - 1]
86
+ tokentypes_enc = tokentypes_enc[0: max_seq_length - 1]
87
+
88
+ # [SEP].
89
+ enc_ids.append(sep_id)
90
+ tokentypes_enc.append(0)
91
+
92
+ num_tokens_enc = len(enc_ids)
93
+ # Padding.
94
+ padding_length = max_seq_length - len(enc_ids)
95
+ if padding_length > 0:
96
+ enc_ids.extend([pad_id] * padding_length)
97
+ tokentypes_enc.extend([pad_id] * padding_length)
98
+
99
+ pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length)
100
+ pad_mask = np.array(pad_mask, dtype=np.int64)
101
+
102
+ return enc_ids, tokentypes_enc, pad_mask
103
+
104
+
105
+ def build_sample(row_id, context_ids, context_types, context_pad_mask):
106
+ """Convert to numpy and return a sample consumed by the batch producer."""
107
+
108
+ context_ids = np.array(context_ids, dtype=np.int64)
109
+ context_types = np.array(context_types, dtype=np.int64)
110
+ context_mask = make_attention_mask(context_ids, context_ids)
111
+
112
+ sample = ({
113
+ 'row_id': row_id,
114
+ 'context': context_ids,
115
+ 'context_mask': context_mask,
116
+ 'context_types': context_types,
117
+ 'context_pad_mask': context_pad_mask
118
+ })
119
+ return sample
120
+
121
+
122
+ class OpenRetrievalEvidenceDataset(ABC, Dataset):
123
+ """Open Retrieval Evidence dataset class."""
124
+
125
+ def __init__(self, task_name, dataset_name, datapath, tokenizer,
126
+ max_seq_length):
127
+ # Store inputs.
128
+ self.task_name = task_name
129
+ self.dataset_name = dataset_name
130
+ self.tokenizer = tokenizer
131
+ self.max_seq_length = max_seq_length
132
+ print_rank_0(' > building {} dataset for {}:'.format(self.task_name,
133
+ self.dataset_name))
134
+ # Process the files.
135
+ print_rank_0(datapath)
136
+ self.samples, self.id2text = self.process_samples_from_single_path(
137
+ datapath)
138
+
139
+ args = get_args()
140
+ if args.sample_rate < 1: # subsample
141
+ k = int(len(self.samples) * args.sample_rate)
142
+ self.samples = random.sample(self.samples, k)
143
+
144
+ print_rank_0(' >> total number of samples: {}'.format(
145
+ len(self.samples)))
146
+
147
+ def __len__(self):
148
+ return len(self.samples)
149
+
150
+ def __getitem__(self, idx):
151
+ row = self.samples[idx]
152
+
153
+ context_ids, context_types, context_pad_mask = \
154
+ build_tokens_types_paddings_from_text(row, self.tokenizer,
155
+ self.max_seq_length)
156
+
157
+ sample = build_sample(row['doc_id'],
158
+ context_ids,
159
+ context_types,
160
+ context_pad_mask)
161
+ return sample
162
+
163
+ @staticmethod
164
+ def process_samples_from_single_path(filename):
165
+ print_rank_0(' > Processing {} ...'.format(filename))
166
+ total = 0
167
+
168
+ rows = []
169
+ id2text = {}
170
+
171
+ with open(filename) as tsvfile:
172
+ reader = csv.reader(tsvfile, delimiter='\t')
173
+ next(reader, None) # skip the headers
174
+ for row in reader:
175
+ # file format: doc_id, doc_text, title
176
+ doc_id = int(row[0])
177
+ text = row[1]
178
+ title = row[2]
179
+
180
+ rows.append({'doc_id': doc_id,
181
+ 'text': text,
182
+ 'title': title})
183
+
184
+ assert doc_id not in id2text
185
+ id2text[doc_id] = (text, title)
186
+
187
+ total += 1
188
+ if total % 100000 == 0:
189
+ print_rank_0(' > processed {} rows so far ...'.format(
190
+ total))
191
+
192
+ print_rank_0(' >> processed {} samples.'.format(len(rows)))
193
+ return rows, id2text
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/realm_dataset_utils.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+ from megatron import print_rank_0
8
+ from megatron.core import mpu, tensor_parallel
9
+ from megatron.data.dataset_utils import create_masked_lm_predictions, pad_and_convert_to_numpy
10
+ from megatron import get_args, get_tokenizer, print_rank_0
11
+ from deepspeed.accelerator import get_accelerator
12
+
13
+
14
+ def get_one_epoch_dataloader(dataset, micro_batch_size=None):
15
+ """Specifically one epoch to be used in an indexing job."""
16
+ args = get_args()
17
+
18
+ world_size = mpu.get_data_parallel_world_size()
19
+ rank = mpu.get_data_parallel_rank()
20
+ if micro_batch_size is None:
21
+ micro_batch_size = args.micro_batch_size
22
+ global_batch_size = micro_batch_size * world_size
23
+ num_workers = args.num_workers
24
+
25
+ sampler = torch.utils.data.SequentialSampler(dataset)
26
+ # importantly, drop_last must be False to get all the data.
27
+ assert False, 'DistributedBatchSampler deprecated, change the implementation'
28
+ from megatron.data.samplers import DistributedBatchSampler
29
+ batch_sampler = DistributedBatchSampler(sampler,
30
+ batch_size=global_batch_size,
31
+ drop_last=False,
32
+ rank=rank,
33
+ world_size=world_size)
34
+
35
+ return torch.utils.data.DataLoader(dataset,
36
+ batch_sampler=batch_sampler,
37
+ num_workers=num_workers,
38
+ pin_memory=True)
39
+
40
+
41
+ def get_ict_batch(data_iterator):
42
+ # Items and their type.
43
+ keys = ['query_tokens', 'query_pad_mask',
44
+ 'block_tokens', 'block_pad_mask', 'block_data']
45
+ datatype = torch.int64
46
+
47
+ # Broadcast data.
48
+ if data_iterator is None:
49
+ data = None
50
+ else:
51
+ data = next(data_iterator)
52
+ data_b = tensor_parallel.broadcast_data(keys, data, datatype)
53
+
54
+ # Unpack.
55
+ query_tokens = data_b['query_tokens'].long()
56
+ query_pad_mask = data_b['query_pad_mask'].long()
57
+ block_tokens = data_b['block_tokens'].long()
58
+ block_pad_mask = data_b['block_pad_mask'].long()
59
+ block_indices = data_b['block_data'].long()
60
+
61
+ return query_tokens, query_pad_mask,\
62
+ block_tokens, block_pad_mask, block_indices
63
+
64
+
65
+ def join_str_list(str_list):
66
+ """Join a list of strings, handling spaces appropriately"""
67
+ result = ""
68
+ for s in str_list:
69
+ if s.startswith("##"):
70
+ result += s[2:]
71
+ else:
72
+ result += " " + s
73
+ return result
74
+
75
+
76
+ class BlockSampleData(object):
77
+ """A struct for fully describing a fixed-size block of data as used in REALM
78
+
79
+ :param start_idx: for first sentence of the block
80
+ :param end_idx: for last sentence of the block (may be partially truncated in sample construction)
81
+ :param doc_idx: the index of the document from which the block comes in the original indexed dataset
82
+ :param block_idx: a unique integer identifier given to every block.
83
+ """
84
+ def __init__(self, start_idx, end_idx, doc_idx, block_idx):
85
+ self.start_idx = start_idx
86
+ self.end_idx = end_idx
87
+ self.doc_idx = doc_idx
88
+ self.block_idx = block_idx
89
+
90
+ def as_array(self):
91
+ return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64)
92
+
93
+ def as_tuple(self):
94
+ return self.start_idx, self.end_idx, self.doc_idx, self.block_idx
95
+
96
+
97
+ class BlockSamplesMapping(object):
98
+ def __init__(self, mapping_array):
99
+ # make sure that the array is compatible with BlockSampleData
100
+ assert mapping_array.shape[1] == 4
101
+ self.mapping_array = mapping_array
102
+
103
+ def __len__(self):
104
+ return self.mapping_array.shape[0]
105
+
106
+ def __getitem__(self, idx):
107
+ """Get the data associated with an indexed sample."""
108
+ sample_data = BlockSampleData(*self.mapping_array[idx])
109
+ return sample_data
110
+
111
+
112
+ def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs,
113
+ max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False):
114
+ """Get samples mapping for a dataset over fixed size blocks. This function also requires
115
+ a dataset of the titles for the source documents since their lengths must be taken into account.
116
+
117
+ :return: samples_mapping (BlockSamplesMapping)
118
+ """
119
+
120
+ if not num_epochs:
121
+ if not max_num_samples:
122
+ raise ValueError("Need to specify either max_num_samples "
123
+ "or num_epochs")
124
+ num_epochs = np.iinfo(np.int32).max - 1
125
+ if not max_num_samples:
126
+ max_num_samples = np.iinfo(np.int64).max - 1
127
+
128
+ # Filename of the index mapping
129
+ indexmap_filename = data_prefix
130
+ indexmap_filename += '_{}_indexmap'.format(name)
131
+ if num_epochs != (np.iinfo(np.int32).max - 1):
132
+ indexmap_filename += '_{}ep'.format(num_epochs)
133
+ if max_num_samples != (np.iinfo(np.int64).max - 1):
134
+ indexmap_filename += '_{}mns'.format(max_num_samples)
135
+ indexmap_filename += '_{}msl'.format(max_seq_length)
136
+ indexmap_filename += '_{}s'.format(seed)
137
+ if use_one_sent_docs:
138
+ indexmap_filename += '_1sentok'
139
+ indexmap_filename += '.npy'
140
+
141
+ # Build the indexed mapping if not exist.
142
+ if mpu.get_data_parallel_rank() == 0 and \
143
+ not os.path.isfile(indexmap_filename):
144
+ print(' > WARNING: could not find index map file {}, building '
145
+ 'the indices on rank 0 ...'.format(indexmap_filename))
146
+
147
+ # Make sure the types match the helpers input types.
148
+ assert block_dataset.doc_idx.dtype == np.int64
149
+ assert block_dataset.sizes.dtype == np.int32
150
+
151
+ # Build samples mapping
152
+ verbose = torch.distributed.get_rank() == 0
153
+ start_time = time.time()
154
+ print_rank_0(' > building samples index mapping for {} ...'.format(
155
+ name))
156
+
157
+ from megatron.data import helpers
158
+ mapping_array = helpers.build_blocks_mapping(
159
+ block_dataset.doc_idx,
160
+ block_dataset.sizes,
161
+ title_dataset.sizes,
162
+ num_epochs,
163
+ max_num_samples,
164
+ max_seq_length - 3, # account for added tokens
165
+ seed,
166
+ verbose,
167
+ use_one_sent_docs)
168
+
169
+
170
+ print_rank_0(' > done building samples index mapping')
171
+ np.save(indexmap_filename, mapping_array, allow_pickle=True)
172
+ print_rank_0(' > saved the index mapping in {}'.format(
173
+ indexmap_filename))
174
+ # Make sure all the ranks have built the mapping
175
+ print_rank_0(' > elapsed time to build and save samples mapping '
176
+ '(seconds): {:4f}'.format(
177
+ time.time() - start_time))
178
+
179
+ # This should be a barrier but nccl barrier assumes
180
+ # device_index=rank which is not the case for model
181
+ # parallel case
182
+ counts = get_accelerator().LongTensor([1])
183
+ torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
184
+ assert counts[0].item() == torch.distributed.get_world_size(
185
+ group=mpu.get_data_parallel_group())
186
+
187
+ # Load indexed dataset.
188
+ print_rank_0(' > loading indexed mapping from {}'.format(
189
+ indexmap_filename))
190
+ start_time = time.time()
191
+
192
+ mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
193
+ samples_mapping = BlockSamplesMapping(mapping_array)
194
+
195
+ print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
196
+ time.time() - start_time))
197
+ print_rank_0(' total number of samples: {}'.format(
198
+ mapping_array.shape[0]))
199
+
200
+ return samples_mapping
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/test/test_preprocess_data.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ IMPL=cached
4
+ python ../preprocess_data.py \
5
+ --input test_samples.json \
6
+ --vocab vocab.txt \
7
+ --dataset-impl ${IMPL} \
8
+ --output-prefix test_samples_${IMPL} \
9
+ --workers 1 \
10
+ --log-interval 2
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/dist_signal_handler.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import signal
2
+
3
+ import torch
4
+
5
+
6
+ def get_world_size():
7
+ if torch.distributed.is_available() and torch.distributed.is_initialized():
8
+ world_size = torch.distributed.get_world_size()
9
+ else:
10
+ world_size = 1
11
+ return world_size
12
+
13
+
14
+ def get_device(local_rank=None):
15
+ backend = torch.distributed.get_backend()
16
+ if backend == 'nccl':
17
+ if local_rank is None:
18
+ device = torch.device('cuda')
19
+ else:
20
+ device = torch.device(f'cuda:{local_rank}')
21
+ elif backend == 'gloo':
22
+ device = torch.device('cpu')
23
+ else:
24
+ raise RuntimeError
25
+ return device
26
+
27
+
28
+ def all_gather_item(item, dtype, group=None, async_op=False, local_rank=None):
29
+ if not torch.distributed.is_available() or \
30
+ not torch.distributed.is_initialized():
31
+ return [item]
32
+
33
+ device = get_device(local_rank)
34
+
35
+ if group is not None:
36
+ group_size = group.size()
37
+ else:
38
+ group_size = get_world_size()
39
+
40
+ tensor = torch.tensor([item], device=device, dtype=dtype)
41
+ output_tensors = [
42
+ torch.zeros(1, dtype=tensor.dtype, device=tensor.device)
43
+ for _ in range(group_size)
44
+ ]
45
+ torch.distributed.all_gather(output_tensors, tensor, group, async_op)
46
+ output = [elem.item() for elem in output_tensors]
47
+ return output
48
+
49
+
50
+ class DistributedSignalHandler:
51
+ def __init__(self, sig=signal.SIGTERM):
52
+ self.sig = sig
53
+
54
+ def signals_received(self):
55
+ all_received = all_gather_item(
56
+ self._signal_received, dtype=torch.int32
57
+ )
58
+ return all_received
59
+
60
+ def __enter__(self):
61
+ self._signal_received = False
62
+ self.released = False
63
+ self.original_handler = signal.getsignal(self.sig)
64
+
65
+ def handler(signum, frame):
66
+ self._signal_received = True
67
+
68
+ signal.signal(self.sig, handler)
69
+
70
+ return self
71
+
72
+ def __exit__(self, type, value, tb):
73
+ self.release()
74
+
75
+ def release(self):
76
+ if self.released:
77
+ return False
78
+
79
+ signal.signal(self.sig, self.original_handler)
80
+ self.released = True
81
+ return True
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/enums.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import enum
17
+
18
+ class LayerType(enum.Enum):
19
+ encoder = 1
20
+ decoder = 2
21
+
22
+ class AttnType(enum.Enum):
23
+ self_attn = 1
24
+ cross_attn = 2
25
+
26
+ class AttnMaskType(enum.Enum):
27
+ padding = 1
28
+ causal = 2
29
+ prefix = 3
30
+
31
+ class PositionEmbeddingType(enum.Enum):
32
+ rotary = 1
33
+ absolute = 2
34
+ alibi = 3
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__init__.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+
3
+ import os
4
+ import pathlib
5
+ import subprocess
6
+
7
+ import torch
8
+ from torch.utils import cpp_extension
9
+
10
+ # Setting this param to a list has a problem of generating different
11
+ # compilation commands (with diferent order of architectures) and
12
+ # leading to recompilation of fused kernels. Set it to empty string
13
+ # to avoid recompilation and assign arch flags explicity in
14
+ # extra_cuda_cflags below
15
+ os.environ["TORCH_CUDA_ARCH_LIST"] = ""
16
+
17
+
18
+ def load(args):
19
+
20
+ # Check if cuda 11 is installed for compute capability 8.0
21
+ cc_flag = []
22
+ if torch.version.hip is None:
23
+ _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(
24
+ cpp_extension.CUDA_HOME)
25
+ if int(bare_metal_major) >= 11:
26
+ cc_flag.append('-gencode')
27
+ cc_flag.append('arch=compute_80,code=sm_80')
28
+ if int(bare_metal_minor) >= 1:
29
+ cc_flag.append('-gencode')
30
+ cc_flag.append('arch=compute_86,code=sm_86')
31
+ if int(bare_metal_minor) >= 4:
32
+ cc_flag.append('-gencode')
33
+ cc_flag.append('arch=compute_87,code=sm_87')
34
+ if int(bare_metal_minor) >= 8:
35
+ cc_flag.append('-gencode')
36
+ cc_flag.append('arch=compute_89,code=sm_89')
37
+ if int(bare_metal_major) >= 12:
38
+ cc_flag.append('-gencode')
39
+ cc_flag.append('arch=compute_90,code=sm_90')
40
+
41
+ # Build path
42
+ srcpath = pathlib.Path(__file__).parent.absolute()
43
+ buildpath = srcpath / 'build'
44
+ _create_build_dir(buildpath)
45
+
46
+ # Helper function to build the kernels.
47
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags, extra_include_paths):
48
+ if torch.version.hip is not None:
49
+ extra_cuda_cflags=['-O3'] + extra_cuda_flags + cc_flag
50
+ else:
51
+ extra_cuda_cflags=['-O3',
52
+ '-gencode', 'arch=compute_70,code=sm_70',
53
+ '--use_fast_math'] + extra_cuda_flags + cc_flag
54
+
55
+ return cpp_extension.load(
56
+ name=name,
57
+ sources=sources,
58
+ build_directory=buildpath,
59
+ extra_cflags=['-O3',],
60
+ extra_cuda_cflags=extra_cuda_cflags,
61
+ extra_include_paths=extra_include_paths,
62
+ verbose=(args.rank == 0)
63
+ )
64
+
65
+ # ==============
66
+ # Fused softmax.
67
+ # ==============
68
+
69
+ if torch.version.hip is not None:
70
+ extra_include_paths=[os.path.abspath(srcpath)]
71
+ else:
72
+ extra_include_paths=[]
73
+
74
+ if args.masked_softmax_fusion:
75
+ if torch.version.hip is not None:
76
+ extra_cuda_flags = ['-D__HIP_NO_HALF_OPERATORS__=1',
77
+ '-D__HIP_NO_HALF_CONVERSIONS__=1']
78
+ else:
79
+ extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__',
80
+ '-U__CUDA_NO_HALF_CONVERSIONS__',
81
+ '--expt-relaxed-constexpr',
82
+ '--expt-extended-lambda']
83
+
84
+ # Upper triangular softmax.
85
+ sources=[srcpath / 'scaled_upper_triang_masked_softmax.cpp',
86
+ srcpath / 'scaled_upper_triang_masked_softmax_cuda.cu']
87
+ scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper(
88
+ "scaled_upper_triang_masked_softmax_cuda",
89
+ sources, extra_cuda_flags, extra_include_paths)
90
+
91
+ # Masked softmax.
92
+ sources=[srcpath / 'scaled_masked_softmax.cpp',
93
+ srcpath / 'scaled_masked_softmax_cuda.cu']
94
+ scaled_masked_softmax_cuda = _cpp_extention_load_helper(
95
+ "scaled_masked_softmax_cuda", sources, extra_cuda_flags, extra_include_paths)
96
+
97
+ # Softmax
98
+ sources=[srcpath / 'scaled_softmax.cpp',
99
+ srcpath / 'scaled_softmax_cuda.cu']
100
+ scaled_softmax_cuda = _cpp_extention_load_helper(
101
+ "scaled_softmax_cuda", sources, extra_cuda_flags, extra_include_paths)
102
+
103
+
104
+ def _get_cuda_bare_metal_version(cuda_dir):
105
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
106
+ universal_newlines=True)
107
+ output = raw_output.split()
108
+ release_idx = output.index("release") + 1
109
+ release = output[release_idx].split(".")
110
+ bare_metal_major = release[0]
111
+ bare_metal_minor = release[1][0]
112
+
113
+ return raw_output, bare_metal_major, bare_metal_minor
114
+
115
+
116
+ def _create_build_dir(buildpath):
117
+ try:
118
+ os.mkdir(buildpath)
119
+ except OSError:
120
+ if not os.path.isdir(buildpath):
121
+ print(f"Creation of the build directory {buildpath} failed")
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/compat.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
2
+
3
+ /*This code is copied fron NVIDIA apex:
4
+ * https://github.com/NVIDIA/apex
5
+ * with minor changes. */
6
+
7
+
8
+
9
+ #ifndef TORCH_CHECK
10
+ #define TORCH_CHECK AT_CHECK
11
+ #endif
12
+
13
+ #ifdef VERSION_GE_1_3
14
+ #define DATA_PTR data_ptr
15
+ #else
16
+ #define DATA_PTR data
17
+ #endif
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.cpp ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
2
+
3
+ #include <cuda_fp16.h>
4
+ #include <torch/extension.h>
5
+ #include <vector>
6
+
7
+ namespace multihead_attn {
8
+ namespace fused_softmax {
9
+ namespace scaled_masked_softmax {
10
+
11
+ torch::Tensor fwd_cuda(
12
+ torch::Tensor const& input,
13
+ torch::Tensor const& mask,
14
+ float scale_factor);
15
+
16
+ torch::Tensor bwd_cuda(
17
+ torch::Tensor const& output_grads,
18
+ torch::Tensor const& softmax_results,
19
+ float scale_factor);
20
+
21
+ int get_batch_per_block_cuda(
22
+ int query_seq_len,
23
+ int key_seq_len,
24
+ int batches,
25
+ int attn_heads);
26
+
27
+ torch::Tensor fwd(
28
+ torch::Tensor const& input,
29
+ torch::Tensor const& mask,
30
+ float scale_factor) {
31
+ AT_ASSERTM(input.dim() == 4, "expected 4D tensor");
32
+ AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
33
+ (input.scalar_type() == at::ScalarType::BFloat16),
34
+ "Only fp16 and bf16 are supported");
35
+ AT_ASSERTM(mask.dim() == 4, "expected 4D tensor");
36
+
37
+ return fwd_cuda(input, mask, scale_factor);
38
+ }
39
+
40
+ torch::Tensor bwd(
41
+ torch::Tensor const& output_grads,
42
+ torch::Tensor const& softmax_results,
43
+ float scale_factor) {
44
+
45
+ AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor");
46
+ AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor");
47
+
48
+ AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
49
+ (output_grads.scalar_type() == at::ScalarType::BFloat16),
50
+ "Only fp16 and bf16 are supported");
51
+ AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
52
+ (softmax_results.scalar_type() == at::ScalarType::BFloat16),
53
+ "Only fp16 and bf16 are supported");
54
+
55
+ return bwd_cuda(output_grads, softmax_results, scale_factor);
56
+ }
57
+
58
+ int get_batch_per_block(
59
+ int query_seq_len,
60
+ int key_seq_len,
61
+ int batches,
62
+ int attn_heads) {
63
+ return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads);
64
+ }
65
+
66
+ } // end namespace scaled_masked_softmax
67
+ } // end namespace fused_softmax
68
+ } // end namespace multihead_attn
69
+
70
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
71
+ m.def("forward",
72
+ &multihead_attn::fused_softmax::scaled_masked_softmax::fwd,
73
+ "Self Multihead Attention scaled, time masked softmax -- Forward.");
74
+
75
+ m.def("backward",
76
+ &multihead_attn::fused_softmax::scaled_masked_softmax::bwd,
77
+ "Self Multihead Attention scaled, time masked softmax -- Backward.");
78
+
79
+ m.def("get_batch_per_block",
80
+ &multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block,
81
+ "Return Batch per block size."
82
+ );
83
+ }
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.h ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
2
+
3
+ #pragma once
4
+
5
+ #include <assert.h>
6
+ #include <cuda_fp16.h>
7
+ #include <cfloat>
8
+ #include <limits>
9
+ #include <stdint.h>
10
+ #include <cuda_fp16.h>
11
+ #include <c10/macros/Macros.h>
12
+
13
+ namespace {
14
+
15
+ template <typename Datatype, int ELEMENTS_PER_LDG>
16
+ __device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
17
+
18
+ template <>
19
+ __device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; }
20
+
21
+ template <>
22
+ __device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); }
23
+
24
+ template <>
25
+ __device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; }
26
+
27
+ template <>
28
+ __device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); }
29
+
30
+ template <>
31
+ __device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; }
32
+
33
+ template <>
34
+ __device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); }
35
+
36
+ int log2_ceil(int value) {
37
+ int log2_value = 0;
38
+ while ((1 << log2_value) < value) ++log2_value;
39
+ return log2_value;
40
+ }
41
+
42
+ template<typename T>
43
+ struct Add {
44
+ __device__ __forceinline__ T operator()(T a, T b) const {
45
+ return a + b;
46
+ }
47
+ };
48
+
49
+ template<typename T>
50
+ struct Max {
51
+ __device__ __forceinline__ T operator()(T a, T b) const {
52
+ return a < b ? b : a;
53
+ }
54
+ };
55
+
56
+ template <typename T>
57
+ __device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
58
+ {
59
+ #if CUDA_VERSION >= 9000
60
+ return __shfl_xor_sync(mask, value, laneMask, width);
61
+ #else
62
+ return __shfl_xor(value, laneMask, width);
63
+ #endif
64
+ }
65
+
66
+ template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
67
+ __device__ __forceinline__ void warp_reduce(acc_t* sum) {
68
+ ReduceOp<acc_t> r;
69
+ #pragma unroll
70
+ for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
71
+ #pragma unroll
72
+ for (int i = 0; i < WARP_BATCH; ++i) {
73
+ acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
74
+ sum[i] = r(sum[i], b);
75
+ }
76
+ }
77
+ }
78
+
79
+
80
+ /*
81
+ * Extended softmax (from native aten pytorch) with following additional features
82
+ * 1) input scaling
83
+ */
84
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements>
85
+ __global__ void scaled_softmax_warp_forward(
86
+ output_t *dst,
87
+ const input_t *src,
88
+ const acc_t scale,
89
+ int micro_batch_size,
90
+ int element_count)
91
+ {
92
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
93
+ // warp_size of method warp_softmax_forward_kernel.
94
+ constexpr int next_power_of_two = 1 << log2_elements;
95
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
96
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
97
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
98
+ constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
99
+
100
+ // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
101
+ // gridDim/blockIdx = (seq_len, attn_heads, batches)
102
+ int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH;
103
+
104
+ // micro_batch_size might not be a multiple of WARP_BATCH. Check how
105
+ // many batches have to computed within this WARP.
106
+ int local_batches = micro_batch_size - first_batch;
107
+ if (local_batches > WARP_BATCH)
108
+ local_batches = WARP_BATCH;
109
+
110
+ // there might be multiple batches per warp. compute the index within the batch
111
+ int local_idx = threadIdx.x;
112
+
113
+ src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
114
+ dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
115
+
116
+ // load data from global memory
117
+ acc_t elements[WARP_BATCH][WARP_ITERATIONS];
118
+ input_t temp_data[ELEMENTS_PER_LDG_STG];
119
+ #pragma unroll
120
+ for (int i = 0; i < WARP_BATCH; ++i) {
121
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
122
+
123
+ #pragma unroll
124
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
125
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
126
+
127
+ if (element_index < batch_element_count) {
128
+ int itr_idx = i*element_count+it*WARP_SIZE;
129
+ copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + itr_idx);
130
+
131
+ #pragma unroll
132
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
133
+ elements[i][it + element] = (acc_t)temp_data[element] * scale;
134
+ }
135
+ } else {
136
+ #pragma unroll
137
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
138
+ elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
139
+ }
140
+ }
141
+ }
142
+ }
143
+
144
+ // compute max_value
145
+ acc_t max_value[WARP_BATCH];
146
+ #pragma unroll
147
+ for (int i = 0; i < WARP_BATCH; ++i) {
148
+ max_value[i] = elements[i][0];
149
+ #pragma unroll
150
+ for (int it = 1; it < WARP_ITERATIONS; ++it) {
151
+ max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
152
+ }
153
+ }
154
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
155
+
156
+ acc_t sum[WARP_BATCH] { 0.0f };
157
+ #pragma unroll
158
+ for (int i = 0; i < WARP_BATCH; ++i) {
159
+ #pragma unroll
160
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
161
+ elements[i][it] = std::exp((elements[i][it] - max_value[i]));
162
+ sum[i] += elements[i][it];
163
+ }
164
+ }
165
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
166
+
167
+ // store result
168
+ output_t out[ELEMENTS_PER_LDG_STG];
169
+ #pragma unroll
170
+ for (int i = 0; i < WARP_BATCH; ++i) {
171
+ if (i >= local_batches)
172
+ break;
173
+ #pragma unroll
174
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
175
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
176
+ if (element_index < element_count) {
177
+ #pragma unroll
178
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
179
+ out[element] = elements[i][it + element] / sum[i];
180
+ }
181
+ copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count + it * WARP_SIZE, out);
182
+ } else {
183
+ break;
184
+ }
185
+ }
186
+ }
187
+ }
188
+
189
+
190
+ /*
191
+ * Extended softmax (from native aten pytorch) with following additional features
192
+ * 1) input scaling
193
+ * 2) Explicit masking
194
+ */
195
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements>
196
+ __global__ void scaled_masked_softmax_warp_forward(
197
+ output_t *dst,
198
+ const input_t *src,
199
+ const uint8_t *mask,
200
+ const acc_t scale,
201
+ int micro_batch_size,
202
+ int element_count,
203
+ int pad_batches)
204
+ {
205
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
206
+ // warp_size of method warp_softmax_forward_kernel.
207
+ constexpr int next_power_of_two = 1 << log2_elements;
208
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
209
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
210
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
211
+ constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
212
+
213
+ // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
214
+ // gridDim/blockIdx = (seq_len, attn_heads, batches)
215
+ int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH;
216
+ int pad_first_batch = 0;
217
+ if (pad_batches != 1) { // bert style
218
+ pad_first_batch = (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH;
219
+ } else { // gpt2 style
220
+ pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
221
+ }
222
+
223
+ // micro_batch_size might not be a multiple of WARP_BATCH. Check how
224
+ // many batches have to computed within this WARP.
225
+ int local_batches = micro_batch_size - first_batch;
226
+ if (local_batches > WARP_BATCH)
227
+ local_batches = WARP_BATCH;
228
+
229
+ // there might be multiple batches per warp. compute the index within the batch
230
+ int local_idx = threadIdx.x;
231
+
232
+ src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
233
+ dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
234
+ mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
235
+
236
+ // load data from global memory
237
+ acc_t elements[WARP_BATCH][WARP_ITERATIONS];
238
+ input_t temp_data[ELEMENTS_PER_LDG_STG];
239
+ uint8_t temp_mask[ELEMENTS_PER_LDG_STG];
240
+ #pragma unroll
241
+ for (int i = 0; i < WARP_BATCH; ++i) {
242
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
243
+
244
+ #pragma unroll
245
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
246
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
247
+
248
+ if (element_index < batch_element_count) {
249
+ int itr_idx = i*element_count+it*WARP_SIZE;
250
+ copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + itr_idx);
251
+ copy_vector<uint8_t, ELEMENTS_PER_LDG_STG>(temp_mask, mask + itr_idx);
252
+
253
+ #pragma unroll
254
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
255
+ if (temp_mask[element] != 1) {
256
+ elements[i][it + element] = (acc_t)temp_data[element] * scale;
257
+ } else {
258
+ elements[i][it + element] = -10000.0;
259
+ }
260
+ }
261
+ } else {
262
+ #pragma unroll
263
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
264
+ elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
265
+ }
266
+ }
267
+ }
268
+ }
269
+
270
+ // compute max_value
271
+ acc_t max_value[WARP_BATCH];
272
+ #pragma unroll
273
+ for (int i = 0; i < WARP_BATCH; ++i) {
274
+ max_value[i] = elements[i][0];
275
+ #pragma unroll
276
+ for (int it = 1; it < WARP_ITERATIONS; ++it) {
277
+ max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
278
+ }
279
+ }
280
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
281
+
282
+ // compute scale value to account for full mask
283
+ acc_t scale_value[WARP_BATCH];
284
+ #pragma unroll
285
+ for (int i = 0; i < WARP_BATCH; ++i) {
286
+ scale_value[i] = (max_value[i] == -10000.0) ? 0.0 : 1.0;
287
+ }
288
+
289
+ acc_t sum[WARP_BATCH] { 0.0f };
290
+ #pragma unroll
291
+ for (int i = 0; i < WARP_BATCH; ++i) {
292
+ #pragma unroll
293
+ for (int it = 0; it < WARP_ITERATIONS; ++it) {
294
+ elements[i][it] = std::exp((elements[i][it] - max_value[i]));
295
+ sum[i] += elements[i][it];
296
+ }
297
+ }
298
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
299
+
300
+ // store result
301
+ output_t out[ELEMENTS_PER_LDG_STG];
302
+ #pragma unroll
303
+ for (int i = 0; i < WARP_BATCH; ++i) {
304
+ if (i >= local_batches)
305
+ break;
306
+ #pragma unroll
307
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
308
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
309
+ if (element_index < element_count) {
310
+ #pragma unroll
311
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
312
+ out[element] = elements[i][it + element] * scale_value[i] / sum[i];
313
+ }
314
+ copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count + it * WARP_SIZE, out);
315
+ } else {
316
+ break;
317
+ }
318
+ }
319
+ }
320
+ }
321
+
322
+ template <typename input_t, typename output_t, typename acc_t, int log2_elements>
323
+ __global__ void scaled_masked_softmax_warp_backward(
324
+ output_t *gradInput,
325
+ input_t *grad,
326
+ const input_t *output,
327
+ acc_t scale,
328
+ int micro_batch_size,
329
+ int element_count)
330
+ {
331
+ // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
332
+ // warp_size of method warp_softmax_backward_kernel.
333
+ constexpr int next_power_of_two = 1 << log2_elements;
334
+ constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
335
+ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
336
+ constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
337
+ constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
338
+
339
+ // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
340
+ // gridDim/blockIdx = (seq_len, attn_heads, batches)
341
+ int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
342
+
343
+ // micro_batch_size might not be a multiple of WARP_BATCH. Check how
344
+ // many batches have to computed within this WARP.
345
+ int local_batches = micro_batch_size - first_batch;
346
+ if (local_batches > WARP_BATCH)
347
+ local_batches = WARP_BATCH;
348
+
349
+ // there might be multiple batches per warp. compute the index within the batch
350
+ int local_idx = threadIdx.x;
351
+
352
+ // the first element to process by the current thread
353
+ int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
354
+ grad += thread_offset;
355
+ output += thread_offset;
356
+ gradInput += thread_offset;
357
+
358
+ // load data from global memory
359
+ acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
360
+ acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
361
+ input_t temp_grad[ELEMENTS_PER_LDG_STG];
362
+ input_t temp_output[ELEMENTS_PER_LDG_STG];
363
+ #pragma unroll
364
+ for (int i = 0; i < WARP_BATCH; ++i) {
365
+ int batch_element_count = (i >= local_batches) ? 0 : element_count;
366
+
367
+ #pragma unroll
368
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
369
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
370
+ if (element_index < batch_element_count) {
371
+ copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_grad, grad + i * element_count + it * WARP_SIZE);
372
+ copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_output, output + i * element_count + it * WARP_SIZE);
373
+
374
+ #pragma unroll
375
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
376
+ output_reg[i][it + element] = (acc_t)temp_output[element];
377
+ }
378
+ #pragma unroll
379
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
380
+ grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element];
381
+ }
382
+ }
383
+ }
384
+ }
385
+
386
+ acc_t sum[WARP_BATCH];
387
+ #pragma unroll
388
+ for (int i = 0; i < WARP_BATCH; ++i) {
389
+ sum[i] = grad_reg[i][0];
390
+ #pragma unroll
391
+ for (int it = 1; it < WARP_ITERATIONS; ++it) {
392
+ sum[i] += grad_reg[i][it];
393
+ }
394
+ }
395
+ warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
396
+
397
+ // store result
398
+ #pragma unroll
399
+ for (int i = 0; i < WARP_BATCH; ++i) {
400
+ if (i >= local_batches)
401
+ break;
402
+ #pragma unroll
403
+ for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
404
+ int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
405
+ if (element_index < element_count) {
406
+ // compute gradients
407
+ output_t out[ELEMENTS_PER_LDG_STG];
408
+ #pragma unroll
409
+ for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
410
+ out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i]));
411
+ }
412
+ copy_vector<output_t, ELEMENTS_PER_LDG_STG>(gradInput + i * element_count + it * WARP_SIZE, out);
413
+ }
414
+ }
415
+ }
416
+ }
417
+ } // end of anonymous namespace
418
+
419
+ int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads){
420
+ int log2_elements = log2_ceil(key_seq_len);
421
+ const int next_power_of_two = 1 << log2_elements;
422
+
423
+ int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
424
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
425
+
426
+ constexpr int threads_per_block = 128;
427
+ int warps_per_block = (threads_per_block / warp_size);
428
+ int batches_per_block = warps_per_block * batches_per_warp;
429
+
430
+ return batches_per_block;
431
+ }
432
+
433
+ template<typename input_t, typename output_t, typename acc_t>
434
+ void dispatch_scaled_softmax_forward(
435
+ output_t *dst,
436
+ const input_t *src,
437
+ const input_t scale,
438
+ int query_seq_len,
439
+ int key_seq_len,
440
+ int batches,
441
+ int attn_heads)
442
+ {
443
+ TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 );
444
+ if (key_seq_len == 0) {
445
+ return;
446
+ } else {
447
+ int log2_elements = log2_ceil(key_seq_len);
448
+ const int next_power_of_two = 1 << log2_elements;
449
+ int batch_count = batches * attn_heads * query_seq_len;
450
+
451
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
452
+ int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
453
+
454
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
455
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
456
+
457
+ // use 128 threads per block to maximimize gpu utilization
458
+ constexpr int threads_per_block = 128;
459
+
460
+ int warps_per_block = (threads_per_block / warp_size);
461
+ int batches_per_block = warps_per_block * batches_per_warp;
462
+ TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0);
463
+ dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches);
464
+ dim3 threads(warp_size, warps_per_block, 1);
465
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
466
+ switch (log2_elements) {
467
+ case 0: // 1
468
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 0>
469
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
470
+ break;
471
+ case 1: // 2
472
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 1>
473
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
474
+ break;
475
+ case 2: // 4
476
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 2>
477
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
478
+ break;
479
+ case 3: // 8
480
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 3>
481
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
482
+ break;
483
+ case 4: // 16
484
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 4>
485
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
486
+ break;
487
+ case 5: // 32
488
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 5>
489
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
490
+ break;
491
+ case 6: // 64
492
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 6>
493
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
494
+ break;
495
+ case 7: // 128
496
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 7>
497
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
498
+ break;
499
+ case 8: // 256
500
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 8>
501
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
502
+ break;
503
+ case 9: // 512
504
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 9>
505
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
506
+ break;
507
+ case 10: // 1024
508
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 10>
509
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
510
+ break;
511
+ case 11: // 2048
512
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 11>
513
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
514
+ break;
515
+ case 12: // 4096
516
+ scaled_softmax_warp_forward<input_t, output_t, acc_t, 12>
517
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, key_seq_len);
518
+ break;
519
+ default:
520
+ break;
521
+ }
522
+ }
523
+ }
524
+
525
+ template<typename input_t, typename output_t, typename acc_t>
526
+ void dispatch_scaled_masked_softmax_forward(
527
+ output_t *dst,
528
+ const input_t *src,
529
+ const uint8_t *mask,
530
+ const input_t scale,
531
+ int query_seq_len,
532
+ int key_seq_len,
533
+ int batches,
534
+ int attn_heads,
535
+ int pad_batches)
536
+ {
537
+ TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 );
538
+ if (key_seq_len == 0) {
539
+ return;
540
+ } else {
541
+ int log2_elements = log2_ceil(key_seq_len);
542
+ const int next_power_of_two = 1 << log2_elements;
543
+ int batch_count = batches * attn_heads * query_seq_len;
544
+
545
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
546
+ int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
547
+
548
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
549
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
550
+
551
+ // use 128 threads per block to maximimize gpu utilization
552
+ constexpr int threads_per_block = 128;
553
+
554
+ int warps_per_block = (threads_per_block / warp_size);
555
+ int batches_per_block = warps_per_block * batches_per_warp;
556
+ TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0);
557
+ dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches);
558
+ dim3 threads(warp_size, warps_per_block, 1);
559
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
560
+ switch (log2_elements) {
561
+ case 0: // 1
562
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 0>
563
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
564
+ break;
565
+ case 1: // 2
566
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 1>
567
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
568
+ break;
569
+ case 2: // 4
570
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 2>
571
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
572
+ break;
573
+ case 3: // 8
574
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 3>
575
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
576
+ break;
577
+ case 4: // 16
578
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 4>
579
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
580
+ break;
581
+ case 5: // 32
582
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 5>
583
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
584
+ break;
585
+ case 6: // 64
586
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 6>
587
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
588
+ break;
589
+ case 7: // 128
590
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 7>
591
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
592
+ break;
593
+ case 8: // 256
594
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 8>
595
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
596
+ break;
597
+ case 9: // 512
598
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 9>
599
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
600
+ break;
601
+ case 10: // 1024
602
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 10>
603
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
604
+ break;
605
+ case 11: // 2048
606
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 11>
607
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
608
+ break;
609
+ case 12: // 4096
610
+ scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 12>
611
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
612
+ break;
613
+ default:
614
+ break;
615
+ }
616
+ }
617
+ }
618
+
619
+ template<typename input_t, typename output_t, typename acc_t>
620
+ void dispatch_scaled_masked_softmax_backward(
621
+ output_t *grad_input,
622
+ input_t *grad,
623
+ const input_t *output,
624
+ const acc_t scale,
625
+ int query_seq_len,
626
+ int key_seq_len,
627
+ int batches,
628
+ int attn_heads)
629
+ {
630
+ TORCH_INTERNAL_ASSERT( key_seq_len >= 0 && key_seq_len <= 4096 );
631
+ if (key_seq_len == 0) {
632
+ return;
633
+ } else {
634
+ int log2_elements = log2_ceil(key_seq_len);
635
+ const int next_power_of_two = 1 << log2_elements;
636
+ int batch_count = batches * attn_heads * query_seq_len;
637
+
638
+ // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
639
+ int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
640
+
641
+ // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
642
+ int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
643
+
644
+ // use 128 threads per block to maximimize gpu utilization
645
+ constexpr int threads_per_block = 128;
646
+
647
+ int warps_per_block = (threads_per_block / warp_size);
648
+ int batches_per_block = warps_per_block * batches_per_warp;
649
+ int blocks = batch_count/batches_per_block;
650
+ dim3 threads(warp_size, warps_per_block, 1);
651
+ // Launch code would be more elegant if C++ supported FOR CONSTEXPR
652
+ switch (log2_elements) {
653
+ case 0: // 1
654
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 0>
655
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
656
+ break;
657
+ case 1: // 2
658
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 1>
659
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
660
+ break;
661
+ case 2: // 4
662
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 2>
663
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
664
+ break;
665
+ case 3: // 8
666
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 3>
667
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
668
+ break;
669
+ case 4: // 16
670
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 4>
671
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
672
+ break;
673
+ case 5: // 32
674
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 5>
675
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
676
+ break;
677
+ case 6: // 64
678
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 6>
679
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
680
+ break;
681
+ case 7: // 128
682
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 7>
683
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
684
+ break;
685
+ case 8: // 256
686
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 8>
687
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
688
+ break;
689
+ case 9: // 512
690
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 9>
691
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
692
+ break;
693
+ case 10: // 1024
694
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 10>
695
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
696
+ break;
697
+ case 11: // 2048
698
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 11>
699
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
700
+ break;
701
+ case 12: // 4096
702
+ scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 12>
703
+ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
704
+ break;
705
+
706
+ default:
707
+ break;
708
+ }
709
+ }
710
+ }
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax_cuda.cu ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <cuda.h>
5
+ #include <cuda_runtime.h>
6
+ #include <cuda_fp16.h>
7
+ #ifndef __HIP_PLATFORM_HCC__
8
+ #include <cuda_profiler_api.h>
9
+ #endif
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <torch/extension.h>
12
+ #include "scaled_masked_softmax.h"
13
+ #include "type_shim.h"
14
+
15
+ namespace multihead_attn {
16
+ namespace fused_softmax {
17
+ namespace scaled_masked_softmax {
18
+
19
+ int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads){
20
+ return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads);
21
+ }
22
+
23
+
24
+ torch::Tensor fwd_cuda(
25
+ torch::Tensor const& input,
26
+ torch::Tensor const& mask,
27
+ float scale_factor)
28
+ {
29
+ // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
30
+ const int batches = input.size(0);
31
+ const int pad_batches = mask.size(0);
32
+ const int attn_heads = input.size(1);
33
+ const int query_seq_len = input.size(2);
34
+ const int key_seq_len = input.size(3);
35
+ TORCH_INTERNAL_ASSERT(key_seq_len <= 4096);
36
+ TORCH_INTERNAL_ASSERT(query_seq_len > 1);
37
+ TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches);
38
+ TORCH_INTERNAL_ASSERT(mask.size(1) == 1);
39
+ TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len);
40
+ TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len);
41
+
42
+ // Output
43
+ auto act_options = input.options().requires_grad(false);
44
+ torch::Tensor softmax_results =
45
+ torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
46
+
47
+ // Softmax Intermediate Result Ptr
48
+ void* input_ptr = static_cast<void*>(input.data_ptr());
49
+ void* mask_ptr = static_cast<void*>(mask.data_ptr());
50
+ void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
51
+
52
+ DISPATCH_HALF_AND_BFLOAT(
53
+ input.scalar_type(),
54
+ "dispatch_scaled_masked_softmax_forward",
55
+ dispatch_scaled_masked_softmax_forward<scalar_t, scalar_t, float>(
56
+ reinterpret_cast<scalar_t*>(softmax_results_ptr),
57
+ reinterpret_cast<const scalar_t*>(input_ptr),
58
+ reinterpret_cast<const uint8_t*>(mask_ptr),
59
+ scale_factor,
60
+ query_seq_len,
61
+ key_seq_len,
62
+ batches,
63
+ attn_heads,
64
+ pad_batches);
65
+ );
66
+ return softmax_results;
67
+ }
68
+
69
+ torch::Tensor bwd_cuda(
70
+ torch::Tensor const& output_grads_,
71
+ torch::Tensor const& softmax_results_,
72
+ float scale_factor) {
73
+
74
+ auto output_grads = output_grads_.contiguous();
75
+ auto softmax_results = softmax_results_.contiguous();
76
+
77
+ //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
78
+ const int batches = output_grads.size(0);
79
+ const int attn_heads = output_grads.size(1);
80
+ const int query_seq_len = output_grads.size(2);
81
+ const int key_seq_len = output_grads.size(3);
82
+
83
+ auto act_options = output_grads.options().requires_grad(false);
84
+ torch::Tensor input_grads =
85
+ torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
86
+
87
+ void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
88
+ void* input_grads_ptr = static_cast<void*>(input_grads.data_ptr());
89
+
90
+ //Softmax Grad
91
+ DISPATCH_HALF_AND_BFLOAT(
92
+ output_grads_.scalar_type(),
93
+ "dispatch_scaled_masked_softmax_backward",
94
+ dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>(
95
+ reinterpret_cast<scalar_t*>(input_grads_ptr),
96
+ reinterpret_cast<scalar_t*>(output_grads_ptr),
97
+ reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
98
+ scale_factor,
99
+ query_seq_len,
100
+ key_seq_len,
101
+ batches,
102
+ attn_heads);
103
+ );
104
+
105
+ return input_grads;
106
+ }
107
+ }
108
+ }
109
+ }
docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax.cpp ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
2
+
3
+ #include <cuda_fp16.h>
4
+ #include <torch/extension.h>
5
+ #include <vector>
6
+
7
+ namespace multihead_attn {
8
+ namespace fused_softmax {
9
+ namespace scaled_softmax {
10
+
11
+ torch::Tensor fwd_cuda(
12
+ torch::Tensor const& input,
13
+ float scale_factor);
14
+
15
+ torch::Tensor bwd_cuda(
16
+ torch::Tensor const& output_grads,
17
+ torch::Tensor const& softmax_results,
18
+ float scale_factor);
19
+
20
+ torch::Tensor fwd(
21
+ torch::Tensor const& input,
22
+ float scale_factor) {
23
+ AT_ASSERTM(input.dim() == 4, "expected 4D tensor");
24
+ AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
25
+ (input.scalar_type() == at::ScalarType::BFloat16),
26
+ "Only fp16 and bf16 are supported");
27
+
28
+ return fwd_cuda(input, scale_factor);
29
+ }
30
+
31
+ torch::Tensor bwd(
32
+ torch::Tensor const& output_grads,
33
+ torch::Tensor const& softmax_results,
34
+ float scale_factor) {
35
+
36
+ AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor");
37
+ AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor");
38
+
39
+ AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
40
+ (output_grads.scalar_type() == at::ScalarType::BFloat16),
41
+ "Only fp16 and bf16 are supported");
42
+ AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
43
+ (softmax_results.scalar_type() == at::ScalarType::BFloat16),
44
+ "Only fp16 and bf16 are supported");
45
+
46
+ return bwd_cuda(output_grads, softmax_results, scale_factor);
47
+ }
48
+
49
+ } // end namespace scaled_softmax
50
+ } // end namespace fused_softmax
51
+ } // end namespace multihead_attn
52
+
53
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
54
+ m.def("forward",
55
+ &multihead_attn::fused_softmax::scaled_softmax::fwd,
56
+ "Self Multihead Attention scaled, softmax -- Forward.");
57
+ m.def("backward",
58
+ &multihead_attn::fused_softmax::scaled_softmax::bwd,
59
+ "Self Multihead Attention scaled, softmax -- Backward.");
60
+ }
61
+