diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..541df1cceacd8ab5d056e90dee7b0b54436b20b0 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__init__.py @@ -0,0 +1,23 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from .global_vars import get_args, get_retro_args +from .global_vars import get_current_global_batch_size +from .global_vars import get_num_microbatches +from .global_vars import get_num_eval_microbatches +from .global_vars import get_num_microbatches_by_mode +from .global_vars import get_signal_handler +from .global_vars import update_num_microbatches +from .global_vars import get_tokenizer +from .global_vars import get_tensorboard_writer +from .global_vars import get_adlr_autoresume +from .global_vars import get_timers +from .initialize import initialize_megatron + +from .utils import (print_rank_0, + is_last_rank, + print_rank_last, + is_rank_0, + is_aml) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/arguments.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/arguments.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bc8e0c87a6f203828d47920e98aa4d5a856d944 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/arguments.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/checkpointing.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/checkpointing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fbebd5094ed5d498348aa5caecb39d2a533d203 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/checkpointing.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/dist_signal_handler.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/dist_signal_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84a52a2895c31fbac4f6fb68283054e4c82081e1 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/dist_signal_handler.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/global_vars.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/global_vars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dea21aded21a93c9e117f5c3385005410b348b1 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/global_vars.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/profiler.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de6cafe4464b2e272a447447b01dccab271412a1 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/profiler.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/timers.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/timers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d747359430f03eceecda9b551cd3ca053d76d4ad Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/timers.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/training.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be18e0792b9c53de407124e67a6609d67a0b8bbd Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/__pycache__/training.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/checkpointing.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..e335b3c6fd33850bb7ff6e6bd0a6aaf57b0d7575 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/checkpointing.py @@ -0,0 +1,797 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +"""Input/output checkpointing.""" + +import os +import random +import sys +import numpy as np +from deepspeed.accelerator import get_accelerator +import torch + +from megatron import update_num_microbatches, get_tokenizer +from megatron.core import mpu, tensor_parallel +from .global_vars import get_args +from tools import verify_checkpoint +from .utils import (unwrap_model, + print_rank_0, + is_rank_0) + +from deepspeed.checkpoint import ( + ORIGINAL_VOCAB_SIZE, + PADDED_VOCAB_SIZE, + UNIVERSAL_CHECKPOINT_INFO, + UNIVERSAL_CHECKPOINT_VERSION_KEY, + UNIVERSAL_CHECKPOINT_VERSION_VALUE, +) + +_CHECKPOINT_VERSION = None + + +def set_checkpoint_version(value): + global _CHECKPOINT_VERSION + if _CHECKPOINT_VERSION is not None: + assert _CHECKPOINT_VERSION == value, \ + "checkpoint versions do not match" + _CHECKPOINT_VERSION = value + + +def get_checkpoint_version(): + global _CHECKPOINT_VERSION + return _CHECKPOINT_VERSION + + +def check_checkpoint_args(checkpoint_args): + """Ensure fixed arguments for a model are the same for the input + arguments and the one retrieved from checkpoint.""" + args = get_args() + + def _compare(arg_name, old_arg_name=None, default=None): + if old_arg_name is not None: + ckpt_arg_name = old_arg_name + else: + ckpt_arg_name = arg_name + if default is not None: + checkpoint_value = getattr(checkpoint_args, ckpt_arg_name, default) + else: + checkpoint_value = getattr(checkpoint_args, ckpt_arg_name) + args_value = getattr(args, arg_name) + error_message = '{} value from checkpoint ({}) is not equal to the ' \ + 'input argument value ({}).'.format( + arg_name, checkpoint_value, args_value) + assert checkpoint_value == args_value, error_message + + if not args.mos and not args.kd: + _compare('num_layers') + _compare('hidden_size') + _compare('num_attention_heads') + _compare('num_key_value_heads') + _compare('add_position_embedding', default=True) + if args.vocab_file: + _compare('max_position_embeddings') + if not args.universal_checkpoint: + _compare('make_vocab_size_divisible_by') + _compare('padded_vocab_size') + _compare('tokenizer_type') + if args.data_parallel_random_init: + _compare('data_parallel_random_init') + if get_checkpoint_version() < 3.0 and not args.universal_checkpoint: + _compare('tensor_model_parallel_size', + old_arg_name='model_parallel_size') + if get_checkpoint_version() >= 3.0 and not args.universal_checkpoint: + _compare('tensor_model_parallel_size') + _compare('pipeline_model_parallel_size') + + +def ensure_directory_exists(filename): + """Build filename's path if it does not already exists.""" + dirname = os.path.dirname(filename) + os.makedirs(dirname, exist_ok = True) + + +def get_checkpoint_name(checkpoints_path, iteration, release=False, + pipeline_parallel=None, + tensor_rank=None, pipeline_rank=None): + """Determine the directory name for this rank's checkpoint.""" + if release: + directory = 'release' + else: + directory = 'iter_{:07d}'.format(iteration) + + # Use both the tensor and pipeline MP rank. + if pipeline_parallel is None: + pipeline_parallel = (mpu.get_pipeline_model_parallel_world_size() > 1) + if tensor_rank is None: + tensor_rank = mpu.get_tensor_model_parallel_rank() + if pipeline_rank is None: + pipeline_rank = mpu.get_pipeline_model_parallel_rank() + + # Use both the tensor and pipeline MP rank. If using the distributed + # optimizer, then the optimizer's path must additionally include the + # data parallel rank. + if not pipeline_parallel: + common_path = os.path.join(checkpoints_path, directory, + f'mp_rank_{tensor_rank:02d}') + else: + common_path = os.path.join(checkpoints_path, directory, + f'mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}') + + return os.path.join(common_path, "model_optim_rng.pt") + + +def get_distributed_optimizer_checkpoint_name(model_checkpoint_name): + return os.path.join(os.path.dirname(model_checkpoint_name), + "distrib_optim.pt") + + +def find_checkpoint_rank_0(checkpoints_path, iteration, release=False): + """Finds the checkpoint for rank 0 without knowing if we are using + pipeline parallelism or not. + + Since the checkpoint naming scheme changes if pipeline parallelism + is present, we need to look for both naming schemes if we don't + know if the checkpoint has pipeline parallelism. + """ + + # Look for checkpoint with no pipelining + filename = get_checkpoint_name(checkpoints_path, iteration, release, + pipeline_parallel=False, + tensor_rank=0, pipeline_rank=0) + if os.path.isfile(filename): + return filename + + # Look for checkpoint with pipelining + filename = get_checkpoint_name(checkpoints_path, iteration, release, + pipeline_parallel=True, + tensor_rank=0, pipeline_rank=0) + if os.path.isfile(filename): + return filename + + return None, None + + +def get_checkpoint_tracker_filename(checkpoints_path): + + """Tracker file rescords the latest chckpoint during + training to restart from.""" + return os.path.join(checkpoints_path, 'latest_checkpointed_iteration.txt') + + +def read_metadata(tracker_filename): + # Read the tracker file and either set the iteration or + # mark it as a release checkpoint. + iteration = 0 + release = False + with open(tracker_filename, 'r') as f: + metastring = f.read().strip() + try: + iteration = int(metastring) + except ValueError: + release = metastring == 'release' + if not release: + print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format( + tracker_filename)) + sys.exit() + assert iteration > 0 or release, 'error parsing metadata file {}'.format( + tracker_filename) + + # Get the max iteration retrieved across the ranks. + if torch.distributed.is_initialized(): + iters_cuda = get_accelerator().LongTensor([iteration]) + torch.distributed.all_reduce(iters_cuda, op=torch.distributed.ReduceOp.MAX) + max_iter = iters_cuda[0].item() + + # We should now have all the same iteration. + # If not, print a warning and chose the maximum + # iteration across all ranks. + if iteration != max_iter: + print('WARNING: on rank {} found iteration {} in the ' + 'metadata while max iteration across the ranks ' + 'is {}, replacing it with max iteration.'.format( + torch.distributed.get_rank(), iteration, max_iter), flush=True) + else: + # When loading a checkpoint outside of training (for example, + # when editing it), we might not have torch distributed + # initialized, in this case, just assume we have the latest + max_iter = iteration + return max_iter, release + + +def get_rng_state(): + """ collect rng state across data parallel ranks """ + args = get_args() + rng_state = { + 'random_rng_state': random.getstate(), + 'np_rng_state': np.random.get_state(), + 'torch_rng_state': torch.get_rng_state(), + 'cuda_rng_state': get_accelerator().get_rng_state(), + 'rng_tracker_states': tensor_parallel.get_cuda_rng_tracker().get_states()} + + rng_state_list = None + if torch.distributed.is_initialized() and \ + mpu.get_data_parallel_world_size() > 1 and \ + args.data_parallel_random_init: + rng_state_list = \ + [None for i in range(mpu.get_data_parallel_world_size())] + torch.distributed.all_gather_object( + rng_state_list, + rng_state, + group=mpu.get_data_parallel_group()) + else: + rng_state_list = [rng_state] + + return rng_state_list + + +def save_checkpoint(iteration, model, optimizer, opt_param_scheduler): + """Save a model checkpoint.""" + args = get_args() + + # Only rank zero of the data parallel writes to the disk. + if not args.deepspeed: + model = unwrap_model(model) + + print_rank_0('saving checkpoint at iteration {:7d} to {}'.format( + iteration, args.save)) + + # Collect rng state across data parallel ranks. + rng_state = get_rng_state() + + # Checkpoint name. + checkpoint_name = get_checkpoint_name(args.save, iteration) + + # Save distributed optimizer's custom parameter state. + if args.use_distributed_optimizer: + optim_checkpoint_name = \ + get_distributed_optimizer_checkpoint_name(checkpoint_name) + ensure_directory_exists(optim_checkpoint_name) + optimizer.save_parameter_state(optim_checkpoint_name) + + # Collect args, model, RNG. + if not torch.distributed.is_initialized() \ + or mpu.get_data_parallel_rank() == 0 or args.deepspeed: + + # Arguments, iteration, and model. + state_dict = {} + state_dict['args'] = args + state_dict['checkpoint_version'] = 3.0 + state_dict['iteration'] = iteration + state_dict['tokens'] = args.consumed_train_tokens + state_dict[UNIVERSAL_CHECKPOINT_INFO] = _universal_checkpoint_info(model) + + # DeepSpeed saves the model/optimizer/scheduler + if not args.deepspeed: + if len(model) == 1: + state_dict['model'] = model[0].state_dict_for_save_checkpoint() + else: + for i in range(len(model)): + mpu.set_virtual_pipeline_model_parallel_rank(i) + state_dict['model%d' % i] = \ + model[i].state_dict_for_save_checkpoint() + + # Optimizer stuff. + if not args.no_save_optim: + if optimizer is not None: + state_dict['optimizer'] = optimizer.state_dict() + if opt_param_scheduler is not None: + state_dict['opt_param_scheduler'] = \ + opt_param_scheduler.state_dict() + + # RNG states. + if not args.no_save_rng: + state_dict["rng_state"] = rng_state + + # Save. + if not args.deepspeed: + ensure_directory_exists(checkpoint_name) + torch.save(state_dict, checkpoint_name) + + if args.deepspeed: + #megatron model uses state_dict_for_save_checkpointing instead of the standard state_dict + #state_dict is used by deepspeed for module saving so it needs to point to the right function + if args.no_pipeline_parallel: + original_state_dict = model[0].module.state_dict + def state_dict_for_save_checkpoint_deepspeed(destination=None, prefix='', keep_vars=False): + return model[0].module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars) + model[0].module.state_dict = state_dict_for_save_checkpoint_deepspeed + + # Saving is a collective communication + checkpoint_name = get_checkpoint_name(args.save, iteration) + + # Trim off the filename and mp_rank_* directory. + for _ in range(3): + checkpoint_name = os.path.dirname(checkpoint_name) + model[0].save_checkpoint(checkpoint_name, client_state=state_dict) + + if args.no_pipeline_parallel: + model[0].module.state_dict = original_state_dict + + # Wait so everyone is done (necessary) + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + print_rank_0(' successfully saved checkpoint at iteration {:7d} to {}' \ + .format(iteration, args.save)) + + # And update the latest iteration + if is_rank_0(): + if args.verify_checkpoint: + ckpt_folder = os.path.join(args.save, f"global_step{iteration}") + prev_iter = iteration - args.save_interval + ckpt_ok = verify_checkpoint(ckpt_folder, + args.verify_checkpoint_model_type) + if not ckpt_ok: + # Fix latest file to previous valid ckpt + with open(os.path.join(args.save, 'latest'), 'w') as fd: + fd.write(f"global_step{prev_iter}") + raise RuntimeError(f"verify_checkpoint failed!!! {ckpt_folder}") + else: + print_rank_0(f"successfully passed ckpt validation: {ckpt_folder}") + tracker_filename = get_checkpoint_tracker_filename(args.save) + with open(tracker_filename, 'w') as f: + f.write(str(iteration)) + + # Wait so everyone is done (not necessary) + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + +def _transpose_first_dim(t, num_splits, num_splits_first, model): + input_shape = t.size() + # We use a self_attention module but the values extracted aren't + # specific to self attention so should work for cross attention as well + while hasattr(model, 'module'): + model = model.module + attention_module = model.language_model.encoder.layers[0].self_attention + #attention_module = model.language_model.encoder.layers[0].attention + hidden_size_per_attention_head = attention_module.hidden_size_per_attention_head + num_attention_heads_per_partition = attention_module.num_attention_heads_per_partition + if num_splits_first: + """[num_splits * np * hn, h] + -->(view) [num_splits, np, hn, h] + -->(tranpose) [np, num_splits, hn, h] + -->(view) [np * num_splits * hn, h] """ + + intermediate_shape = \ + (num_splits, num_attention_heads_per_partition, + hidden_size_per_attention_head) + input_shape[1:] + + t = t.view(*intermediate_shape) + t = t.transpose(0, 1).contiguous() + else: + """[np * hn * num_splits, h] + -->(view) [np, hn, num_splits, h] + -->(tranpose) [np, num_splits, hn, h] + -->(view) [np * num_splits * hn, h] """ + + intermediate_shape = \ + (num_attention_heads_per_partition, + hidden_size_per_attention_head, num_splits) +\ + input_shape[1:] + + t = t.view(*intermediate_shape) + t = t.transpose(1, 2).contiguous() + t = t.view(*input_shape) + + return t + +def fix_query_key_value_ordering(model, checkpoint_version): + """Fix up query/key/value matrix ordering if checkpoint + version is smaller than 2.0 + """ + if checkpoint_version < 2.0: + if isinstance(model, list): + assert len(model)==1 + model = model[0] + for name, param in model.named_parameters(): + if name.endswith(('.query_key_value.weight', '.query_key_value.bias')): + if checkpoint_version == 0: + fixed_param = _transpose_first_dim(param.data, 3, True, model) + elif checkpoint_version == 1.0: + fixed_param = _transpose_first_dim(param.data, 3, False, model) + else: + print_rank_0(f"Invalid checkpoint version {checkpoint_version}.") + sys.exit() + param.data.copy_(fixed_param) + if name.endswith(('.key_value.weight', '.key_value.bias')): + if checkpoint_version == 0: + fixed_param = _transpose_first_dim(param.data, 2, True, model) + elif checkpoint_version == 1.0: + fixed_param = _transpose_first_dim(param.data, 2, False, model) + else: + print_rank_0(f"Invalid checkpoint version {checkpoint_version}.") + sys.exit() + param.data.copy_(fixed_param) + print_rank_0(" succesfully fixed query-key-values ordering for" + " checkpoint version {}".format(checkpoint_version)) + + +def _load_base_checkpoint(load_dir, rank0=False): + """ Load the base state_dict from the given directory + + If rank0 is true, just loads rank 0 checkpoint, ignoring arguments. + """ + + # Read the tracker file and set the iteration. + tracker_filename = get_checkpoint_tracker_filename(load_dir) + + # If no tracker file, return nothing + if not os.path.isfile(tracker_filename): + if not rank0: + print_rank_0('WARNING: could not find the metadata file {} '.format( + tracker_filename)) + print_rank_0(' will not load any checkpoints and will start from ' + 'random') + return None, False + + # Otherwise, read the tracker file and either set the iteration or + # mark it as a release checkpoint. + iteration, release = read_metadata(tracker_filename) + + # Checkpoint. + if rank0: + checkpoint_name = find_checkpoint_rank_0(load_dir, iteration, release) + else: + checkpoint_name = get_checkpoint_name(load_dir, iteration, release) + if release: + print_rank_0(f' loading release checkpoint from {load_dir}') + else: + print_rank_0(f' loading checkpoint from {load_dir} at iteration {iteration}') + + # Load the checkpoint. + try: + state_dict = torch.load(checkpoint_name, map_location='cpu') + except ModuleNotFoundError: + from megatron.fp16_deprecated import loss_scaler + # For backward compatibility. + if not rank0: + print_rank_0(' > deserializing using the old code structure ...') + sys.modules['fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + sys.modules['megatron.fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + state_dict = torch.load(checkpoint_name, map_location='cpu') + sys.modules.pop('fp16.loss_scaler', None) + sys.modules.pop('megatron.fp16.loss_scaler', None) + except BaseException as e: + print_rank_0('could not load the checkpoint') + print_rank_0(e) + sys.exit() + + return state_dict, release + + +def load_args_from_checkpoint(args, load_arg='load'): + """Set required arguments from the checkpoint specified in the + arguments. + + Will overwrite arguments that have a non-None default value, but + will leave any arguments that default to None as set. + + Returns the same args NameSpace with the new values added/updated. + + If no checkpoint is specified in args, or if the checkpoint is + there but invalid, the arguments will not be modified + + """ + load_dir = getattr(args, load_arg) + + if load_dir is None: + print_rank_0('No load directory specified, using provided arguments.') + return args + + state_dict, release = _load_base_checkpoint(load_dir, rank0=True) + + # Args. + if not state_dict: + print_rank_0('Checkpoint not found to provide arguments, using provided arguments.') + return args + + if 'args' not in state_dict: + print_rank_0('Checkpoint provided does not have arguments saved, using provided arguments.') + return args + + checkpoint_args = state_dict['args'] + checkpoint_version = state_dict.get('checkpoint_version', 0) + args.iteration = state_dict['iteration'] + + # One-off conversion for foundation models + if hasattr(checkpoint_args, 'disable_bias_linear'): + setattr(checkpoint_args, 'add_bias_linear', not getattr(checkpoint_args, 'disable_bias_linear')) + + def _set_arg(arg_name, old_arg_name=None, force=False): + if not force and getattr(args, arg_name, None) is not None: + return + + if old_arg_name is not None: + checkpoint_value = getattr(checkpoint_args, old_arg_name, None) + else: + checkpoint_value = getattr(checkpoint_args, arg_name, None) + + if checkpoint_value is not None: + print_rank_0(f"Setting {arg_name} to {checkpoint_value} from checkpoint") + setattr(args, arg_name, checkpoint_value) + else: + print_rank_0(f"Checkpoint did not provide arguments {arg_name}") + + _set_arg('num_layers') + _set_arg('hidden_size') + _set_arg('ffn_hidden_size') + _set_arg('seq_length') + _set_arg('num_attention_heads') + _set_arg('num_key_value_heads') + _set_arg('kv_channels') + _set_arg('max_position_embeddings') + _set_arg('add_position_embedding', force=True) + _set_arg('use_rotary_position_embeddings', force=True) + _set_arg('rotary_percent', force=True) + _set_arg('add_bias_linear', force=True) + _set_arg('swiglu', force=True) + _set_arg('untie_embeddings_and_output_weights', force=True) + _set_arg('apply_layernorm_1p', force=True) + _set_arg('tokenizer_type') + _set_arg('padded_vocab_size') + if checkpoint_version < 3.0: + _set_arg('tensor_model_parallel_size', + 'model_parallel_size') + else: + _set_arg('tensor_model_parallel_size', force=True) + _set_arg('pipeline_model_parallel_size', force=True) + _set_arg('virtual_pipeline_model_parallel_size', force=True) + _set_arg('num_layers_per_virtual_pipeline_stage') + return args, checkpoint_args + + +def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True, load_only_weights=False): + """Load a model checkpoint and return the iteration. + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` of the checkpoint match the names of + parameters and buffers in model. + """ + args = get_args() + load_dir = getattr(args, load_arg) + + if args.deepspeed: + if args.finetune: + loaded_dir, state_dict = model[0].load_checkpoint(load_dir, + load_module_strict=strict, load_optimizer_states=False, + load_lr_scheduler_states=False, load_module_only=True) + else: + loaded_dir, state_dict = model[0].load_checkpoint(load_dir, + load_module_strict=strict) + if loaded_dir is None: + print_rank_0('WARNING: could not find the metadata file {} '.format( + load_dir)) + print_rank_0(' will not load any checkpoints and will start from ' + 'random') + return 0 + release = False + else: + model = unwrap_model(model) + + state_dict, release = _load_base_checkpoint(load_dir, rank0=False) + + # Checkpoint not loaded. + if state_dict is None: + + # Conditionally exit at this point. + if args.exit_on_missing_checkpoint: + print_rank_0(">> '--exit-on-missing-checkpoint' set ... exiting. <<") + torch.distributed.barrier() + sys.exit() + + # Iteration defaults to 0. + return 0 + checkpoint_name = get_checkpoint_name(load_dir, state_dict['iteration'], release) + + # Set checkpoint version. + set_checkpoint_version(state_dict.get('checkpoint_version', 0)) + + # Set iteration. + if args.finetune or release or args.reset_iteration or load_only_weights: + iteration = 0 + # Make DeepSpeed engine aware of this reset of iteration + model[0].global_steps = 0 + else: + try: + iteration = state_dict['iteration'] + if 'tokens' in state_dict: + args.consumed_train_tokens = state_dict['tokens'] + except KeyError: + try: # Backward compatible with older checkpoints + iteration = state_dict['total_iters'] + except KeyError: + print_rank_0('A metadata file exists but unable to load ' + 'iteration from checkpoint {}, exiting'.format( + checkpoint_name)) + sys.exit() + + # Check arguments. + reset_train_valid_samples = args.reset_iteration + if not load_only_weights and not reset_train_valid_samples: + assert args.consumed_train_samples == 0 + assert args.consumed_valid_samples == 0 + if 'args' in state_dict and not args.finetune: + checkpoint_args = state_dict['args'] + check_checkpoint_args(checkpoint_args) + args.consumed_train_samples = getattr(checkpoint_args, + 'consumed_train_samples', 0) + update_num_microbatches(consumed_samples=args.consumed_train_samples) + args.consumed_valid_samples = getattr(checkpoint_args, + 'consumed_valid_samples', 0) + else: + print_rank_0('could not find arguments in the checkpoint ...') + + # Model. + if not args.deepspeed: + if len(model) == 1: + model[0].load_state_dict(state_dict['model'], strict=strict) + else: + for i in range(len(model)): + mpu.set_virtual_pipeline_model_parallel_rank(i) + model[i].load_state_dict(state_dict['model%d' % i], strict=strict) + + # Fix up query/key/value matrix ordering if needed. + checkpoint_version = get_checkpoint_version() + print_rank_0(f' checkpoint version {checkpoint_version}') + fix_query_key_value_ordering(model, checkpoint_version) + + # Optimizer. + if not args.deepspeed: + if not release and not args.finetune and not args.no_load_optim: + try: + # Load state dict. + if optimizer is not None: + optimizer.load_state_dict(state_dict['optimizer']) + + # Load distributed optimizer's custom parameter state. + if args.use_distributed_optimizer: + tracker_filename = get_checkpoint_tracker_filename(load_dir) + iteration, release = read_metadata(tracker_filename) + model_checkpoint_name = \ + get_checkpoint_name(load_dir, iteration, release) + optim_checkpoint_name = \ + get_distributed_optimizer_checkpoint_name( + model_checkpoint_name) + optimizer.load_parameter_state(optim_checkpoint_name) + + # Load scheduler. + if opt_param_scheduler is not None: + if 'lr_scheduler' in state_dict: # backward compatbility + opt_param_scheduler.load_state_dict(state_dict['lr_scheduler']) + else: + opt_param_scheduler.load_state_dict(state_dict['opt_param_scheduler']) + except KeyError: + print_rank_0('Unable to load optimizer from checkpoint {}. ' + 'Specify --no-load-optim or --finetune to prevent ' + 'attempting to load the optimizer state, ' + 'exiting ...'.format(checkpoint_name)) + sys.exit() + else: + if (args.fp16 or args.bf16) and optimizer is not None: + optimizer.reload_model_params() + + # rng states. + if not release and not args.finetune and not args.no_load_rng: + try: + if 'rng_state' in state_dict: + # access rng_state for data parallel rank + if args.data_parallel_random_init: + rng_state = state_dict['rng_state'][mpu.get_data_parallel_rank()] + else: + rng_state = state_dict['rng_state'][0] + random.setstate(rng_state['random_rng_state']) + np.random.set_state(rng_state['np_rng_state']) + torch.set_rng_state(rng_state['torch_rng_state']) + get_accelerator().set_rng_state(rng_state['cuda_rng_state']) + # Check for empty states array + if not rng_state['rng_tracker_states']: + raise KeyError + tensor_parallel.get_cuda_rng_tracker().set_states( + rng_state['rng_tracker_states']) + else: # backward compatability + random.setstate(state_dict['random_rng_state']) + np.random.set_state(state_dict['np_rng_state']) + torch.set_rng_state(state_dict['torch_rng_state']) + get_accelerator().set_rng_state(state_dict['cuda_rng_state']) + # Check for empty states array + if not state_dict['rng_tracker_states']: + raise KeyError + tensor_parallel.get_cuda_rng_tracker().set_states( + state_dict['rng_tracker_states']) + except KeyError: + print_rank_0('Unable to load rng state from checkpoint {}. ' + 'Specify --no-load-rng or --finetune to prevent ' + 'attempting to load the rng state, ' + 'exiting ...'.format(checkpoint_name)) + sys.exit() + + if args.universal_checkpoint: + # TLDR: unique rng is needed for dropout to be really random on TP ranks + # + # Each tp-rank stores its model-parallel-rng states info. + # This is required to e.g. have different dropout patterns on different tp ranks that operate on + # slices of attention_probs tensor. + # + # When loading from universal checkpoint, we use mp_rank__model_states.pt checkpoint files + # to restore the model-parallel-rng ( is {tp-rank, pp-rank} combination). + # However, if the loaded checkpoint mp configuration does not match the current mp configuration, + # we can not use it to restore model-parallel-rng info. + # + # In the case of mp configuration change, we reconfigure the model-parallel-rng states s.t. each + # tp-rank will have a unique state. In order to ensure that subsequent loads from universal will + # not cause the model-parallel-rng states to be repeated, we add the iteration number to the base seed. + ckp_args = state_dict['args'] + if ((args.tensor_model_parallel_size != ckp_args.tensor_model_parallel_size) + or (args.pipeline_model_parallel_size != ckp_args.pipeline_model_parallel_size)): + print_rank_0(' loading universal checkpoint with modified mp configuration ' + '-> reconfigure tp seed') + tensor_parallel.model_parallel_reconfigure_tp_seed(args.seed + iteration) + + # Some utilities want to load a checkpoint without distributed being initialized + if torch.distributed.is_initialized(): + torch.distributed.barrier() + + print_rank_0(f' successfully loaded checkpoint from {args.load} ' + f'at iteration {iteration}') + + # from .utils import dump_weights, dump_position_embed_weights + # dump_weights(f'{args.universal_checkpoint=}', iteration, model, optimizer) + # dump_position_embed_weights("init", 0, model) + + return iteration + + +def load_biencoder_checkpoint(model, only_query_model=False, + only_context_model=False, custom_load_path=None): + """ + selectively load retrieval models for indexing/retrieving + from saved checkpoints + """ + + args = get_args() + + model = unwrap_model(model) + + load_path = custom_load_path if custom_load_path is not None else args.load + + tracker_filename = get_checkpoint_tracker_filename(load_path) + with open(tracker_filename, 'r') as f: + iteration = int(f.read().strip()) + + checkpoint_name = get_checkpoint_name(load_path, iteration, + args.use_distributed_optimizer, + release=False) + + if mpu.get_data_parallel_rank() == 0: + print('global rank {} is loading checkpoint {}'.format( + torch.distributed.get_rank(), checkpoint_name)) + + state_dict = torch.load(checkpoint_name, map_location='cpu') + ret_state_dict = state_dict['model'] + + if only_query_model: + ret_state_dict.pop('context_model') + if only_context_model: + ret_state_dict.pop('query_model') + + assert len(model) == 1 + model[0].load_state_dict(ret_state_dict) + torch.distributed.barrier() + + if mpu.get_data_parallel_rank() == 0: + print(' successfully loaded {}'.format(checkpoint_name)) + + return model + + +def _universal_checkpoint_info(model): + args = get_args() + tokenizer = get_tokenizer() + info = dict() + info[UNIVERSAL_CHECKPOINT_VERSION_KEY] = UNIVERSAL_CHECKPOINT_VERSION_VALUE + info[ORIGINAL_VOCAB_SIZE] = tokenizer.vocab_size + info[PADDED_VOCAB_SIZE] = args.padded_vocab_size + info.update(model[0].universal_checkpoint_info()) + return info diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/README.md b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0c8c61738da2f6526d065eb600e33d23187bcd0c --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/README.md @@ -0,0 +1 @@ +Megatron Core is a library for efficient and scalable training of transformer based models. diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/enums.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/enums.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547275a6d201814c552403c1777a594c6a71cb8c Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/enums.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/utils.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a86f56516fd115942c26a361b381fff86fe6e32e Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/__pycache__/utils.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/enums.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/enums.py new file mode 100644 index 0000000000000000000000000000000000000000..cf1452b23e4df38201f74ddea901dbb7dffe7dc7 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/enums.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import enum + +class ModelType(enum.Enum): + encoder_or_decoder = 1 + encoder_and_decoder = 2 + retro_encoder = 3 + retro_decoder = 4 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_bias_dropout.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_bias_dropout.py new file mode 100644 index 0000000000000000000000000000000000000000..5c0d49c972ce088beeac6d50309896ca1b0089c2 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_bias_dropout.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import torch +from typing import Tuple, Optional + +def _bias_dropout_add_func(x, bias, residual, prob, training): + # type: (Tensor, Optional[Tensor], Tensor, float, bool) -> Tensor + # NOTE: Previously, the argument `bias` used to be passed as + # `bias.expand_as(residual)` when the `bias_dropout_func` is called from the + # transformer layer but broadcasting should automatically take care of that. + # Also, looking at broadcasting semantics, `expand_as` and broadcasting + # seem to be identical performance-wise (both just change the view). + if bias is not None: + x = x + bias + out = torch.nn.functional.dropout(x, p=prob, training=training) + out = residual + out + return out + +def get_bias_dropout_add(training, fused): + + def unfused_bias_dropout_add(x_with_bias, residual, prob): + x, bias = x_with_bias # unpack + return _bias_dropout_add_func(x, bias, residual, prob, training) + + @torch.jit.script + def bias_dropout_add_fused_train( + x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], + residual: torch.Tensor, + prob: float + ) -> torch.Tensor: + x, bias = x_with_bias # unpack + return _bias_dropout_add_func(x, bias, residual, prob, True) + + @torch.jit.script + def bias_dropout_add_fused_inference( + x_with_bias: Tuple[torch.Tensor, Optional[torch.Tensor]], + residual: torch.Tensor, + prob: float + ) -> torch.Tensor: + x, bias = x_with_bias # unpack + return _bias_dropout_add_func(x, bias, residual, prob, False) + + if fused: + # jit scripting for a nn.module (with dropout) is not + # triggering the fusion kernel. For now, we use two + # different nn.functional routines to account for varying + # dropout semantics during training and inference phases. + if training: + return bias_dropout_add_fused_train + else: + return bias_dropout_add_fused_inference + else: + return unfused_bias_dropout_add diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_layer_norm.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0c3b987af78893831f132076b8ab092a2cc64f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/fusions/fused_layer_norm.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import numbers +import torch +from torch.nn.parameter import Parameter +from torch.nn import init +import importlib + +from megatron.core.utils import make_viewless_tensor + +try: + from apex.contrib.layer_norm.layer_norm import FastLayerNormFN + HAVE_PERSIST_LAYER_NORM = True +except: + HAVE_PERSIST_LAYER_NORM = False + +try: + from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction + HAVE_FUSED_LAYER_NORM = True +except: + HAVE_FUSED_LAYER_NORM = False + + +class FusedLayerNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-5, + persist_layer_norm=True, + sequence_parallel=False, + zero_centered_gamma=False): + super().__init__() + + self.zero_centered_gamma = zero_centered_gamma + + # List of hiddens sizes supported in the persistent layer norm kernel + # If the hidden size is not supported, fall back to the non-persistent + # kernel. + persist_ln_hidden_sizes = [1024, 1536, 2048, 2304, 3072, 3840, 4096, + 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480, + 24576, 25600, 30720, 32768, 40960, 49152, 65536] + if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM: + persist_layer_norm = False + + if not persist_layer_norm and not HAVE_FUSED_LAYER_NORM: + # TODO: Add pytorch only layer norm + raise ValueError(f'Apex must currently be installed to use megatron core.') + + if isinstance(hidden_size, numbers.Integral): + hidden_size = (hidden_size,) + self.hidden_size = torch.Size(hidden_size) + self.eps = eps + self.weight = Parameter(torch.Tensor(*hidden_size)) + self.bias = Parameter(torch.Tensor(*hidden_size)) + self.reset_parameters() + self.persist_layer_norm = persist_layer_norm + self.sequence_parallel = sequence_parallel + + # set sequence parallelism flag on weight and bias parameters + setattr(self.weight, 'sequence_parallel', self.sequence_parallel) + setattr(self.bias, 'sequence_parallel', self.sequence_parallel) + + + def reset_parameters(self): + + if self.zero_centered_gamma: + init.zeros_(self.weight) + init.zeros_(self.bias) + else: + init.ones_(self.weight) + init.zeros_(self.bias) + + def forward(self, input): + + weight = self.weight + 1 if self.zero_centered_gamma else self.weight + + if self.persist_layer_norm: + output = FastLayerNormFN.apply(input, weight, self.bias, self.eps) + + # Apex's fast layer norm function outputs a 'view' tensor (i.e., has + # a populated '_base' field). This will result in schedule.py's + # deallocate_output_tensor() throwing an error, so a viewless tensor is + # created to prevent this. + output = make_viewless_tensor(inp = output, + requires_grad = input.requires_grad, + keep_graph = True) + + else: + output = FusedLayerNormAffineFunction.apply(input, weight, self.bias, self.hidden_size, self.eps) + + return output diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/model_parallel_config.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/model_parallel_config.py new file mode 100644 index 0000000000000000000000000000000000000000..441e5a892d050bcdee848b7de8e02e6921028e6a --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/model_parallel_config.py @@ -0,0 +1,164 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +from dataclasses import dataclass +from typing import Callable + +import torch + +@dataclass +class ModelParallelConfig: + """Base configuration for Megatron Core + + Model Parallelism + ----------------- + + tensor_model_parallel_size (int): Intra-layer model parallelism. Splits tensors across GPU ranks. Defaults to 1. + + pipeline_model_parallel_size (int): Inter-layer model parallelism. Splits transformer layers across GPU + ranks. Defaults to 1. + + virtual_pipeline_model_parallel_size (int): Interleaved pipeline parallelism is used to improve performance by + reducing the pipeline bubble. Considers a transformer block as a list of smaller transformer (virtual) blocks. + The number of virtual blocks per pipeline model parallel rank is the virtual model parallel size. See Efficient + Large-Scale Language Model Training on GPU Clusters Using Megatron-LM: https://arxiv.org/pdf/2104.04473.pdf for + more details. Defaults to None. + + sequence_parallel (bool): Makes tensor parallelism more memory efficient for LLMs (20B+) by + parallelizing layer norms and dropout sequentially. See Reducing Activation Recomputation in Large Transformer + Models: https://arxiv.org/abs/2205.05198 for more details. Defaults to False. + + Initialization + -------------- + + perform_initialization (bool, default=True): If true, weights are initialized. This option can be useful when you + know you are going to load values from a checkpoint. + + use_cpu_initialization: (bool, default=False): When set to False, we initialize the weights directly on the GPU. + Transferring weights from CPU to GPU can take a significant amount of time for large models. Defaults to False. + + Training + -------- + + fp16 (bool): If true, train with fp16 mixed precision training. Defaults to False. + + bf16 (bool): If true, train with bf16 mixed precision training. Defaults to False. + + params_dtype (torch.dtype): dtype used when intializing the weights. Defaults to torch.float32 + + timers (optional, default=None): TODO + + Optimizations + ------------- + + gradient_accumulation_fusion (bool): If true, fuses weight gradient accumulation to GEMMs. Requires the custom CUDA + extension fused_weight_gradient_mlp_cuda module. To use gradient_accumulation_fusion you must install APEX with + --cpp_ext and --cuda_ext. For example: "pip install --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" + ". Note that the extension requires CUDA>=11. Otherwise, you must turn off gradient accumulation fusion. + Defaults to False. + + async_tensor_model_parallel_allreduce (bool, default=True): If true, enables asynchronous execution of + tensor-model-parallel all-reduce with weight gradient compuation of a column-linear layer. Defaults to False. + + Pipeline Parallelism + -------------------- + + pipeline_dtype (required): dtype used in p2p communication, usually params_dtype + + grad_scale_func (optional, default=None): If using loss scaling, this function should take the loss and return the + scaled loss. If None, no function is called on the loss. + + enable_autocast (bool): If true runs the forward step function inside torch.autocast context. Default is False. + + autocast_dtype (torch.dtype): dtype to pass to torch.amp.autocast when enabled. Default is pipeline_dtype. + + variable_seq_lengths (bool, default=False): Support for variable sequence lengths across microbatches. Setting this + communicates the size of tensors during pipeline parallelism communication, because of this extra overhead it + should only be set if the sequence length varies by microbatch within a global batch. + + num_microbatches_with_partial_activation_checkpoints (int, default=None): If int, set the number of microbatches + where not all of the layers will be checkpointed and recomputed. The rest of the microbatches within the window + of maximum outstanding microbatches will recompute all layers (either full recompute or selective recompute). If + None, the checkpoint and recompute will be left up to the forward_step function. + + overlap_p2p_comm (bool, optional, default=False): When True some of the peer to peer communication for pipeline + parallelism will overlap with computation. Must be False if batch_p2p_comm is true. + + batch_p2p_comm (bool, default=True): Use batch_isend_irecv instead of individual isend/irecv calls. Must be False + if overlap_p2p_comm is True. + + batch_p2p_sync (bool, default=True): When using batch_isend_irecv, do a cuda.device.synchronize afterward to work + around a bug in older version of PyTorch. + + use_ring_exchange_p2p (bool, default = False): Use custom ring_exchange kernel instead of + torch.distributed.batch_isend_irecv(). Requires custom built torch with torch.distributed.ring_exchange. + + deallocate_pipeline_outputs (optional, default=False): If True, output data is deallocated after the tensor is sent + to the next pipeline stage. Helps with saving memory, does nothing when pipeline parallel is not used. + + no_sync_func (optional): Function that creates a context that suppresses asynchronous data-parallel + communication. If the model is an instance of torch.nn.DistributedDataParallel, the default is to use + torch.nn.DistributedDataParallel.no_sync. + + grad_sync_func (optional): Function that launches asynchronous gradient reductions (e.g. distributed optimizer + gradient reduce-scatters). The function should take one argument: an iterable of parameters whose gradients are + to be synchronized. + + param_sync_func (optional): Function that launches asynchronous parameter synchronizations (e.g. distributed + optimizer parameter all-gathers). The function should take one argument: an iterable of parameters to be + synchronized. + + """ + + # Model parallelism + tensor_model_parallel_size: int = 1 + pipeline_model_parallel_size: int = 1 + virtual_pipeline_model_parallel_size: int = None + sequence_parallel: bool = False + + # Initialization + perform_initialization: bool = True + use_cpu_initialization: bool = False + + # Training + fp16: bool = False + bf16: bool = False + params_dtype: torch.dtype = torch.float32 + timers: Callable = None + + # Optimizations + gradient_accumulation_fusion: bool = False + async_tensor_model_parallel_allreduce: bool = False + + # Pipeline Parallel + pipeline_dtype: torch.dtype = None + grad_scale_func: Callable = None + enable_autocast: bool = False + autocast_dtype: torch.dtype = None + variable_seq_lengths: bool = False + num_microbatches_with_partial_activation_checkpoints: int = None + overlap_p2p_comm: bool = False + batch_p2p_comm: bool = True + batch_p2p_sync: bool = True + use_ring_exchange_p2p: bool = False + deallocate_pipeline_outputs: bool = False + no_sync_func: Callable = None + grad_sync_func: Callable = None + param_sync_func: Callable = None + + def __post_init__(self): + """ Python dataclass method that is used to modify attributes after initialization. + See https://docs.python.org/3/library/dataclasses.html#post-init-processing for more details. + """ + if self.sequence_parallel: + if self.tensor_model_parallel_size <= 1: + raise ValueError("Can not use sequence paralllelism without tensor parallelism") + if self.async_tensor_model_parallel_allreduce: + # sequence_parallelism already does this async + self.async_tensor_model_parallel_allreduce = False + + if self.pipeline_model_parallel_size > 1: + if self.pipeline_dtype is None: + raise ValueError("When using pipeline parallelism, pipeline_dtype must be specified") + + if self.autocast_dtype is None: + self.autocast_dtype = self.params_dtype diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/parallel_state.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/parallel_state.py new file mode 100644 index 0000000000000000000000000000000000000000..819760e98bda6bc4676ca17888a7a7f63091ae08 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/parallel_state.py @@ -0,0 +1,766 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Model and data parallel groups.""" + +import torch +from typing import Optional + +from .utils import GlobalMemoryBuffer + +# Intra-layer model parallel group that the current rank belongs to. +_TENSOR_MODEL_PARALLEL_GROUP = None +# Inter-layer model parallel group that the current rank belongs to. +_PIPELINE_MODEL_PARALLEL_GROUP = None +# Model parallel group (both intra- and pipeline) that the current rank belongs to. +_MODEL_PARALLEL_GROUP = None +# Embedding group. +_EMBEDDING_GROUP = None +# Position embedding group. +_POSITION_EMBEDDING_GROUP = None +# Data parallel group that the current rank belongs to. +_DATA_PARALLEL_GROUP = None +_DATA_PARALLEL_GROUP_GLOO = None +# FP8 amax reduction group. +_AMAX_REDUCTION_GROUP = None + +_VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None +_VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_PIPELINE_MODEL_PARALLEL_SPLIT_RANK = None + +# These values enable us to change the mpu sizes on the fly. +_MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None +_MPU_TENSOR_MODEL_PARALLEL_RANK = None +_MPU_PIPELINE_MODEL_PARALLEL_RANK = None + +# A list of ranks that have a copy of the embedding. +_EMBEDDING_GLOBAL_RANKS = None + +# A list of ranks that have a copy of the position embedding. +_POSITION_EMBEDDING_GLOBAL_RANKS = None + +# A list of global ranks for each pipeline group to ease calculation of the source +# rank when broadcasting from the first or last pipeline stage. +_PIPELINE_GLOBAL_RANKS = None + +# For DeepSpeed's sequence parallel +_SEQUENCE_PARALLEL_GROUP = None +_SEQUENCE_PARALLEL_WORLD_SIZE = None +_SEQUENCE_PARALLEL_RANK = None + +# This group includes processes for both data and sequence parallelisms. +# We use this group to reduce gradients and shard parameters and optimizer stages for ZeRO. +_SEQUENCE_DATA_PARALLEL_GROUP = None +_SEQUENCE_DATA_PARALLEL_WORLD_SIZE = None +_SEQUENCE_DATA_PARALLEL_RANK = None + +# A list of global ranks for each data parallel group to ease calculation of the source +# rank when broadcasting weights from src to all other data parallel ranks +_DATA_PARALLEL_GLOBAL_RANKS = None + +# Memory buffers to avoid dynamic memory allocation +_GLOBAL_MEMORY_BUFFER = None + + +def initialize_model_parallel( + tensor_model_parallel_size: int = 1, + pipeline_model_parallel_size: int = 1, + sequence_parallel_size: int = 1, + virtual_pipeline_model_parallel_size: Optional[int] = None, + pipeline_model_parallel_split_rank: Optional[int] = None, + use_fp8: bool = False, + use_distributed_optimizer: bool = False, +) -> None: + """Initialize model data parallel groups. + + Arguments: + tensor_model_parallel_size (int, default = 1): + The number of GPUs to split individual tensors across. + + pipeline_model_parallel_size (int, default = 1): + The number of tensor parallel GPU groups to split the + Transformer layers across. For example, if + tensor_model_parallel_size is 4 and + pipeline_model_parallel_size is 2, the model will be split + into 2 groups of 4 GPUs. + + virtual_pipeline_model_parallel_size (int, optional): + The number of stages that each pipeline group will have, + interleaving as necessary. If None, no interleaving is + performed. For example, if tensor_model_parallel_size is 1, + pipeline_model_parallel_size is 4, + virtual_pipeline_model_parallel_size is 2, and there are + 16 transformer layers in the model, the model will be + split into 8 stages with two layers each and each GPU + would get 2 stages as such (layer number starting with 1): + + GPU 0: [1, 2] [9, 10] + GPU 1: [3, 4] [11, 12] + GPU 2: [5, 6] [13, 14] + GPU 3: [7, 8] [15, 16] + + pipeline_model_parallel_split_rank (int, optional): + For models with both an encoder and decoder, the rank in + pipeline to switch between encoder and decoder (i.e. the + first rank of the decoder). This allows the user to set + the pipeline parallel size of the encoder and decoder + independently. For example, if + pipeline_model_parallel_size is 8 and + pipeline_model_parallel_split_rank is 3, then ranks 0-2 + will be the encoder and ranks 3-7 will be the decoder. + + use_fp8 (bool, default = False): + Construct GPU groups needed for FP8 training, namely for + amax reduction across the product of the data-parallel and + tensor-parallel groups. + + Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we + use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize + the model pipeline. The present function will + create 8 tensor model-parallel groups, 4 pipeline model-parallel groups + and 8 data-parallel groups as: + 8 data_parallel groups: + [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] + 8 tensor model-parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] + 4 pipeline model-parallel groups: + [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + + """ + # Get world size and rank. Ensure some consistencies. + assert torch.distributed.is_initialized() + world_size: int = torch.distributed.get_world_size() + + if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0: + raise RuntimeError( + f"world_size ({world_size}) is not divisible by tensor_model_parallel_size " + f"({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})" + ) + + enable_ds_sequence_parallel = sequence_parallel_size > 1 + if enable_ds_sequence_parallel: + assert tensor_model_parallel_size == 1 and pipeline_model_parallel_size == 1, \ + 'DeepSpeed\'s sequence parallel does not work with tensor parallel or pipeline parallel' + + if world_size % sequence_parallel_size != 0: + raise RuntimeError( + f"world_size ({world_size}) is not divisible by sequence_parallel_size {sequence_parallel_size})" + ) + + data_parallel_size: int = world_size // (tensor_model_parallel_size * pipeline_model_parallel_size * sequence_parallel_size) + sequence_data_parallel_size: int = sequence_parallel_size * data_parallel_size + + num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size + num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size + num_data_parallel_groups: int = world_size // data_parallel_size + num_sequence_parallel_groups: int = world_size // sequence_parallel_size + num_sequence_data_parallel_groups: int = world_size // sequence_parallel_size // data_parallel_size + + if virtual_pipeline_model_parallel_size is not None: + if not pipeline_model_parallel_size > 2: + raise RuntimeError("pipeline-model-parallel size should be greater than 2 with " "interleaved schedule") + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0 + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = virtual_pipeline_model_parallel_size + + if pipeline_model_parallel_split_rank is not None: + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank + + rank = torch.distributed.get_rank() + + # Build the data-parallel groups. + global _DATA_PARALLEL_GROUP + global _DATA_PARALLEL_GROUP_GLOO + global _DATA_PARALLEL_GLOBAL_RANKS + assert _DATA_PARALLEL_GROUP is None, 'data parallel group is already initialized' + all_data_parallel_group_ranks = [] + for i in range(pipeline_model_parallel_size): + start_rank = i * num_pipeline_model_parallel_groups + end_rank = (i + 1) * num_pipeline_model_parallel_groups + + if sequence_parallel_size > 1: + tp_or_sp_size = sequence_parallel_size + else: + tp_or_sp_size = tensor_model_parallel_size + + for j in range(tp_or_sp_size): + ranks = range(start_rank + j, end_rank, tp_or_sp_size) + all_data_parallel_group_ranks.append(list(ranks)) + group = torch.distributed.new_group(ranks) + if use_distributed_optimizer: + group_gloo = torch.distributed.new_group(ranks, backend="gloo") + else: + group_gloo = None + if rank in ranks: + _DATA_PARALLEL_GROUP = group + _DATA_PARALLEL_GROUP_GLOO = group_gloo + _DATA_PARALLEL_GLOBAL_RANKS = ranks + + # Build the sequence parallel groups. + global _SEQUENCE_PARALLEL_GROUP + assert _SEQUENCE_PARALLEL_GROUP is None, \ + 'sequence parallel group is already initialized' + for i in range(num_sequence_parallel_groups): + ranks = range(i * sequence_parallel_size, + (i + 1) * sequence_parallel_size) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _SEQUENCE_PARALLEL_GROUP = group + + # Build the sequence data parallel groups. + global _SEQUENCE_DATA_PARALLEL_GROUP + assert _SEQUENCE_DATA_PARALLEL_GROUP is None, \ + 'sequence data parallel group is already initialized' + all_data_sequence_parallel_group_ranks = [] + if enable_ds_sequence_parallel: + for i in range(num_sequence_data_parallel_groups): + ranks = range(i * sequence_data_parallel_size, + (i + 1) * sequence_data_parallel_size) + group = torch.distributed.new_group(ranks) + all_data_sequence_parallel_group_ranks.append(list(ranks)) + if rank in ranks: + _SEQUENCE_DATA_PARALLEL_GROUP = group + else: + _SEQUENCE_DATA_PARALLEL_GROUP = _DATA_PARALLEL_GROUP + + # Build the model-parallel groups. + global _MODEL_PARALLEL_GROUP + assert _MODEL_PARALLEL_GROUP is None, 'model parallel group is already initialized' + num_model_parallel_groups = sequence_data_parallel_size if enable_ds_sequence_parallel else data_parallel_size + model_parallel_group_ranks = all_data_sequence_parallel_group_ranks if enable_ds_sequence_parallel else all_data_parallel_group_ranks + for i in range(num_model_parallel_groups): + ranks = [parallel_group_ranks[i] for parallel_group_ranks in model_parallel_group_ranks] + group = torch.distributed.new_group(ranks) + if rank in ranks: + _MODEL_PARALLEL_GROUP = group + + # Build the tensor model-parallel groups. + global _TENSOR_MODEL_PARALLEL_GROUP + assert _TENSOR_MODEL_PARALLEL_GROUP is None, 'tensor model parallel group is already initialized' + for i in range(num_tensor_model_parallel_groups): + ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _TENSOR_MODEL_PARALLEL_GROUP = group + + + # Build the pipeline model-parallel groups and embedding groups + # (first and last rank in each pipeline model-parallel group). + global _PIPELINE_MODEL_PARALLEL_GROUP + global _PIPELINE_GLOBAL_RANKS + assert _PIPELINE_MODEL_PARALLEL_GROUP is None, 'pipeline model parallel group is already initialized' + global _EMBEDDING_GROUP + global _EMBEDDING_GLOBAL_RANKS + assert _EMBEDDING_GROUP is None, 'embedding group is already initialized' + global _POSITION_EMBEDDING_GROUP + global _POSITION_EMBEDDING_GLOBAL_RANKS + assert _POSITION_EMBEDDING_GROUP is None, 'position embedding group is already initialized' + for i in range(num_pipeline_model_parallel_groups): + ranks = range(i, world_size, num_pipeline_model_parallel_groups) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _PIPELINE_MODEL_PARALLEL_GROUP = group + _PIPELINE_GLOBAL_RANKS = ranks + # Setup embedding group (to exchange gradients between + # first and last stages). + if len(ranks) > 1: + embedding_ranks = [ranks[0], ranks[-1]] + position_embedding_ranks = [ranks[0]] + if pipeline_model_parallel_split_rank is not None: + if ranks[pipeline_model_parallel_split_rank] not in embedding_ranks: + embedding_ranks = [ranks[0], ranks[pipeline_model_parallel_split_rank], ranks[-1]] + if ranks[pipeline_model_parallel_split_rank] not in position_embedding_ranks: + position_embedding_ranks = [ranks[0], ranks[pipeline_model_parallel_split_rank]] + else: + embedding_ranks = ranks + position_embedding_ranks = ranks + + group = torch.distributed.new_group(embedding_ranks) + if rank in embedding_ranks: + _EMBEDDING_GROUP = group + if rank in ranks: + _EMBEDDING_GLOBAL_RANKS = embedding_ranks + + group = torch.distributed.new_group(position_embedding_ranks) + if rank in position_embedding_ranks: + _POSITION_EMBEDDING_GROUP = group + if rank in ranks: + _POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks + + # Build the FP8 groups. + global _AMAX_REDUCTION_GROUP + assert _AMAX_REDUCTION_GROUP is None, \ + 'FP8 amax reduction group is already initialized' + if use_fp8: + amax_group_size: int = tensor_model_parallel_size * data_parallel_size + num_amax_groups: int = world_size // amax_group_size + for i in range(num_amax_groups): + start_rank = i * amax_group_size + end_rank = (i + 1) * amax_group_size + ranks = range(start_rank, end_rank) + group = torch.distributed.new_group(ranks) + if rank in ranks: + _AMAX_REDUCTION_GROUP = group + + # Initialize global memory buffer + # This isn't really "parallel state" but there isn't another good place to + # put this. If we end up with a more generic initialization of megatron-core + # we could stick it there + _set_global_memory_buffer() + + +def is_unitialized(): + """Useful for code segments that may be accessed with or without mpu initialization""" + return _DATA_PARALLEL_GROUP is None + + +def model_parallel_is_initialized(): + """Check if model and data parallel groups are initialized.""" + if _TENSOR_MODEL_PARALLEL_GROUP is None or _PIPELINE_MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None: + return False + return True + +def sequence_parallel_is_initialized(): + """Check if sequence and data parallel groups are initialized.""" + if _SEQUENCE_PARALLEL_GROUP is None or \ + _DATA_PARALLEL_GROUP is None: + return False + return True + +def sequence_data_parallel_is_initialized(): + """Check if sequence data parallel groups are initialized.""" + if _SEQUENCE_DATA_PARALLEL_GROUP is None: + return False + return True + +def get_model_parallel_group(): + """Get the model parallel group the caller rank belongs to.""" + assert _MODEL_PARALLEL_GROUP is not None, 'model parallel group is not initialized' + return _MODEL_PARALLEL_GROUP + + +def get_tensor_model_parallel_group(check_initialized=True): + """Get the tensor model parallel group the caller rank belongs to.""" + if check_initialized: + assert _TENSOR_MODEL_PARALLEL_GROUP is not None, 'tensor model parallel group is not initialized' + return _TENSOR_MODEL_PARALLEL_GROUP + + +def get_pipeline_model_parallel_group(): + """Get the pipeline model parallel group the caller rank belongs to.""" + assert _PIPELINE_MODEL_PARALLEL_GROUP is not None, 'pipeline_model parallel group is not initialized' + return _PIPELINE_MODEL_PARALLEL_GROUP + +def get_sequence_parallel_group(): + """Get the sequence parallel group the caller rank belongs to.""" + assert _SEQUENCE_PARALLEL_GROUP is not None, \ + 'sequence parallel group is not initialized' + return _SEQUENCE_PARALLEL_GROUP + + +def get_sequence_data_parallel_group(): + """Get the sequence parallel group the caller rank belongs to.""" + assert _SEQUENCE_DATA_PARALLEL_GROUP is not None, \ + 'sequence data parallel group is not initialized' + return _SEQUENCE_DATA_PARALLEL_GROUP + + +def get_data_parallel_group(): + """Get the data parallel group the caller rank belongs to.""" + assert _DATA_PARALLEL_GROUP is not None, 'data parallel group is not initialized' + return _DATA_PARALLEL_GROUP + + +def get_data_parallel_group_gloo(): + """Get the data parallel group-gloo the caller rank belongs to.""" + assert _DATA_PARALLEL_GROUP_GLOO is not None, \ + 'data parallel group-gloo is not initialized' + return _DATA_PARALLEL_GROUP_GLOO + + +def get_embedding_group(): + """Get the embedding group the caller rank belongs to.""" + assert _EMBEDDING_GROUP is not None, 'embedding group is not initialized' + return _EMBEDDING_GROUP + + +def get_position_embedding_group(): + """Get the position embedding group the caller rank belongs to.""" + assert _POSITION_EMBEDDING_GROUP is not None, 'position embedding group is not initialized' + return _POSITION_EMBEDDING_GROUP + + +def get_amax_reduction_group(): + """Get the FP8 amax reduction group the caller rank belongs to.""" + assert _AMAX_REDUCTION_GROUP is not None, \ + 'FP8 amax reduction group is not initialized' + return _AMAX_REDUCTION_GROUP + + +def set_tensor_model_parallel_world_size(world_size): + """Set the tensor model parallel size""" + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size + +def set_sequence_parallel_world_size(world_size): + """Set the sequence parallel size""" + global _SEQUENCE_PARALLEL_WORLD_SIZE + _SEQUENCE_PARALLEL_WORLD_SIZE = world_size + +def set_sequence_data_parallel_world_size(world_size): + """Set the sequence parallel size""" + global _SEQUENCE_DATA_PARALLEL_WORLD_SIZE + _SEQUENCE_DATA_PARALLEL_WORLD_SIZE = world_size + +def set_pipeline_model_parallel_world_size(world_size): + """Set the pipeline model parallel size""" + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size + +def set_virtual_pipeline_model_parallel_world_size(world_size): + """Set the pipeline model parallel size""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size + +def set_virtual_pipeline_model_parallel_world_size(world_size): + """Set the virtual pipeline model parallel size""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size + + +def get_tensor_model_parallel_world_size(): + """Return world size for the tensor model parallel group.""" + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None: + return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_tensor_model_parallel_group()) + +def get_model_parallel_world_size(): + assert get_pipeline_model_parallel_world_size() == 1, "legacy get_model_parallel_world_size is only supported if PP is disabled" + return get_tensor_model_parallel_world_size() + +def get_sequence_parallel_world_size(): + """Return world size for the sequence parallel group.""" + global _SEQUENCE_PARALLEL_WORLD_SIZE + if _SEQUENCE_PARALLEL_WORLD_SIZE is not None: + return _SEQUENCE_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_sequence_parallel_group()) + +def get_sequence_data_parallel_world_size(): + """Return world size for the sequence parallel group.""" + global _SEQUENCE_DATA_PARALLEL_WORLD_SIZE + if _SEQUENCE_DATA_PARALLEL_WORLD_SIZE is not None: + return _SEQUENCE_DATA_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_sequence_data_parallel_group()) + +def get_pipeline_model_parallel_world_size(): + """Return world size for the pipeline model parallel group.""" + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None: + return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group()) + + +def set_tensor_model_parallel_rank(rank): + """Set tensor model parallel rank.""" + global _MPU_TENSOR_MODEL_PARALLEL_RANK + _MPU_TENSOR_MODEL_PARALLEL_RANK = rank + + +def get_model_parallel_rank(): + assert get_pipeline_model_parallel_world_size() == 1, "legacy get_model_parallel_rank is only supported if PP is disabled" + return get_tensor_model_parallel_rank() + + +def set_sequence_parallel_rank(rank): + """Set sequence parallel rank.""" + global _SEQUENCE_PARALLEL_RANK + _SEQUENCE_PARALLEL_RANK = rank + + +def set_sequence_data_parallel_rank(rank): + """Set sequence parallel rank.""" + global _SEQUENCE_DATA_PARALLEL_RANK + _SEQUENCE_DATA_PARALLEL_RANK = rank + + +def set_pipeline_model_parallel_rank(rank): + """Set pipeline model parallel rank.""" + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + _MPU_PIPELINE_MODEL_PARALLEL_RANK = rank + + +def set_pipeline_model_parallel_split_rank(rank): + """Set pipeline model parallel split rank.""" + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = rank + + +def get_tensor_model_parallel_rank(): + """Return my rank for the tensor model parallel group.""" + global _MPU_TENSOR_MODEL_PARALLEL_RANK + if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None: + return _MPU_TENSOR_MODEL_PARALLEL_RANK + return torch.distributed.get_rank(group=get_tensor_model_parallel_group()) + + +def get_pipeline_model_parallel_rank(): + """Return my rank for the pipeline model parallel group.""" + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None: + return _MPU_PIPELINE_MODEL_PARALLEL_RANK + return torch.distributed.get_rank(group=get_pipeline_model_parallel_group()) + + +def get_pipeline_model_parallel_split_rank(): + """Return pipeline model parallel split rank.""" + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + return _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + + +def get_sequence_parallel_rank(): + """Return my rank for the sequence parallel group.""" + global _SEQUENCE_PARALLEL_RANK + if _SEQUENCE_PARALLEL_RANK is not None: + return _SEQUENCE_PARALLEL_RANK + return torch.distributed.get_rank(group=get_sequence_parallel_group()) + + +def get_sequence_data_parallel_rank(): + """Return my rank for the sequence data parallel group.""" + global _SEQUENCE_DATA_PARALLEL_RANK + if _SEQUENCE_DATA_PARALLEL_RANK is not None: + return _SEQUENCE_DATA_PARALLEL_RANK + return torch.distributed.get_rank(group=get_sequence_data_parallel_group()) + + +def is_pipeline_first_stage(ignore_virtual=False): + """Return True if in the first pipeline model-parallel stage, False otherwise.""" + if not ignore_virtual: + if ( + get_virtual_pipeline_model_parallel_world_size() is not None + and get_virtual_pipeline_model_parallel_rank() != 0 + ): + return False + return get_pipeline_model_parallel_rank() == 0 + + +def is_pipeline_last_stage(ignore_virtual=False): + """Return True if in the last pipeline model-parallel stage, False otherwise.""" + if not ignore_virtual: + virtual_pipeline_model_parallel_world_size = get_virtual_pipeline_model_parallel_world_size() + if virtual_pipeline_model_parallel_world_size is not None and get_virtual_pipeline_model_parallel_rank() != ( + virtual_pipeline_model_parallel_world_size - 1 + ): + return False + return get_pipeline_model_parallel_rank() == (get_pipeline_model_parallel_world_size() - 1) + + +def is_rank_in_embedding_group(ignore_virtual=False): + """Return true if current rank is in embedding group, False otherwise.""" + rank = torch.distributed.get_rank() + global _EMBEDDING_GLOBAL_RANKS + if ignore_virtual: + return rank in _EMBEDDING_GLOBAL_RANKS + if rank in _EMBEDDING_GLOBAL_RANKS: + if rank == _EMBEDDING_GLOBAL_RANKS[0]: + return is_pipeline_first_stage(ignore_virtual=False) + elif rank == _EMBEDDING_GLOBAL_RANKS[-1]: + return is_pipeline_last_stage(ignore_virtual=False) + else: + return True + return False + + +def is_rank_in_position_embedding_group(): + """Return true if current rank is in position embedding group, False otherwise.""" + rank = torch.distributed.get_rank() + global _POSITION_EMBEDDING_GLOBAL_RANKS + return rank in _POSITION_EMBEDDING_GLOBAL_RANKS + + +def is_pipeline_stage_before_split(rank=None): + """Return True if pipeline stage executes encoder block for a model + with both encoder and decoder.""" + if get_pipeline_model_parallel_world_size() == 1: + return True + if rank is None: + rank = get_pipeline_model_parallel_rank() + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None: + return True + if rank < _PIPELINE_MODEL_PARALLEL_SPLIT_RANK: + return True + return False + + +def is_pipeline_stage_after_split(rank=None): + """Return True if pipeline stage executes decoder block for a model + with both encoder and decoder.""" + if get_pipeline_model_parallel_world_size() == 1: + return True + if rank is None: + rank = get_pipeline_model_parallel_rank() + global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK + if _PIPELINE_MODEL_PARALLEL_SPLIT_RANK is None: + return True + if rank >= _PIPELINE_MODEL_PARALLEL_SPLIT_RANK: + return True + return False + + +def is_pipeline_stage_at_split(): + """Return true if pipeline stage executes decoder block and next + stage executes encoder block for a model with both encoder and + decoder.""" + rank = get_pipeline_model_parallel_rank() + return is_pipeline_stage_before_split(rank) and is_pipeline_stage_after_split(rank + 1) + + +def get_virtual_pipeline_model_parallel_rank(): + """Return the virtual pipeline-parallel rank.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + + +def set_virtual_pipeline_model_parallel_rank(rank): + """Set the virtual pipeline-parallel rank.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank + + +def get_virtual_pipeline_model_parallel_world_size(): + """Return the virtual pipeline-parallel world size.""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + + +def set_virtual_pipeline_model_parallel_world_size(world_size): + """Set the virtual pipeline-parallel world size""" + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size + + +def get_tensor_model_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the tensor model parallel group.""" + global_rank = torch.distributed.get_rank() + local_world_size = get_tensor_model_parallel_world_size() + return (global_rank // local_world_size) * local_world_size + + +def get_sequence_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the sequence parallel group.""" + global_rank = torch.distributed.get_rank() + local_world_size = get_sequence_parallel_world_size() + return (global_rank // local_world_size) * local_world_size + + +def get_data_parallel_src_rank(): + """Calculate the global rank corresponding to the first local rank + in the data parallel group.""" + assert _DATA_PARALLEL_GLOBAL_RANKS is not None, "Data parallel group is not initialized" + return _DATA_PARALLEL_GLOBAL_RANKS[0] + + +def get_pipeline_model_parallel_first_rank(): + """Return the global rank of the first process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized" + return _PIPELINE_GLOBAL_RANKS[0] + + +def get_pipeline_model_parallel_last_rank(): + """Return the global rank of the last process in the pipeline for the + current tensor parallel group""" + assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized" + last_rank_local = get_pipeline_model_parallel_world_size() - 1 + return _PIPELINE_GLOBAL_RANKS[last_rank_local] + + +def get_pipeline_model_parallel_next_rank(): + """Return the global rank that follows the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized" + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size] + + +def get_pipeline_model_parallel_prev_rank(): + """Return the global rank that preceeds the caller in the pipeline""" + assert _PIPELINE_GLOBAL_RANKS is not None, "Pipeline parallel group is not initialized" + rank_in_pipeline = get_pipeline_model_parallel_rank() + world_size = get_pipeline_model_parallel_world_size() + return _PIPELINE_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size] + + +def get_data_parallel_world_size(): + """Return world size for the data parallel group.""" + return torch.distributed.get_world_size(group=get_data_parallel_group()) + + +def get_data_parallel_rank(): + """Return my rank for the data parallel group.""" + return torch.distributed.get_rank(group=get_data_parallel_group()) + + +def _set_global_memory_buffer(): + """Initialize global buffer""" + global _GLOBAL_MEMORY_BUFFER + assert _GLOBAL_MEMORY_BUFFER is None, 'global memory buffer is already initialized' + _GLOBAL_MEMORY_BUFFER = GlobalMemoryBuffer() + + +def get_global_memory_buffer(): + """Return the global GlobalMemoryBuffer object""" + assert _GLOBAL_MEMORY_BUFFER is not None, 'global memory buffer is not initialized' + return _GLOBAL_MEMORY_BUFFER + + +def destroy_global_memory_buffer(): + """Sets the global memory buffer to None""" + global _GLOBAL_MEMORY_BUFFER + _GLOBAL_MEMORY_BUFFER = None + + +def destroy_model_parallel(): + """Set the groups to none.""" + global _MODEL_PARALLEL_GROUP + _MODEL_PARALLEL_GROUP = None + global _TENSOR_MODEL_PARALLEL_GROUP + _TENSOR_MODEL_PARALLEL_GROUP = None + global _PIPELINE_MODEL_PARALLEL_GROUP + _PIPELINE_MODEL_PARALLEL_GROUP = None + global _DATA_PARALLEL_GROUP + _DATA_PARALLEL_GROUP = None + global _SEQUENCE_PARALLEL_GROUP + _SEQUENCE_PARALLEL_GROUP = None + global _SEQUENCE_DATA_PARALLEL_GROUP + _SEQUENCE_DATA_PARALLEL_GROUP = None + global _EMBEDDING_GROUP + _EMBEDDING_GROUP = None + global _POSITION_EMBEDDING_GROUP + _POSITION_EMBEDDING_GROUP = None + global _AMAX_REDUCTION_GROUP + _AMAX_REDUCTION_GROUP = None + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK + _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = None + global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE + _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE + _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = None + global _MPU_TENSOR_MODEL_PARALLEL_RANK + _MPU_TENSOR_MODEL_PARALLEL_RANK = None + global _MPU_PIPELINE_MODEL_PARALLEL_RANK + _MPU_PIPELINE_MODEL_PARALLEL_RANK = None + global _GLOBAL_MEMORY_BUFFER + _GLOBAL_MEMORY_BUFFER = None diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/requirements.txt b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f957179eb5ff5561f6d4bad51445aa08a651f790 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/requirements.txt @@ -0,0 +1,6 @@ +pybind11 +torch +regex +einops +datasets +sentencepiece diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..12c1ab95bb68d756c0d819d77a8a179970c4788e --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__init__.py @@ -0,0 +1,70 @@ +from .cross_entropy import vocab_parallel_cross_entropy +from .data import broadcast_data + +from .layers import ( + ColumnParallelLinear, + RowParallelLinear, + VocabParallelEmbedding, + set_tensor_model_parallel_attributes, + set_defaults_if_not_set_tensor_model_parallel_attributes, + copy_tensor_model_parallel_attributes, + param_is_not_tensor_parallel_duplicate, + linear_with_grad_accumulation_and_async_allreduce + +) + +from .mappings import ( + copy_to_tensor_model_parallel_region, + gather_from_tensor_model_parallel_region, + gather_from_sequence_parallel_region, + scatter_to_tensor_model_parallel_region, + scatter_to_sequence_parallel_region, +) + +from .random import ( + checkpoint, + get_cuda_rng_tracker, + model_parallel_cuda_manual_seed, + model_parallel_reconfigure_tp_seed, + init_checkpointed_activations_memory_buffer, + reset_checkpointed_activations_memory_buffer, +) + +from .utils import ( + split_tensor_along_last_dim, + split_tensor_into_1d_equal_chunks, + gather_split_1d_tensor, +) + +__all__ = [ + # cross_entropy.py + "vocab_parallel_cross_entropy", + # data.py + "broadcast_data", + #layers.py + "ColumnParallelLinear", + "RowParallelLinear", + "VocabParallelEmbedding", + "set_tensor_model_parallel_attributes", + "set_defaults_if_not_set_tensor_model_parallel_attributes", + "copy_tensor_model_parallel_attributes", + "param_is_not_tensor_parallel_duplicate", + "linear_with_grad_accumulation_and_async_allreduce", + # mappings.py + "copy_to_tensor_model_parallel_region", + "gather_from_tensor_model_parallel_region", + "gather_from_sequence_parallel_region", +# "reduce_from_tensor_model_parallel_region", + "scatter_to_tensor_model_parallel_region", + "scatter_to_sequence_parallel_region", + # random.py + "checkpoint", + "get_cuda_rng_tracker", + "model_parallel_cuda_manual_seed", + "init_checkpointed_activations_memory_buffer", + "reset_checkpointed_activations_memory_buffer", + # utils.py + "split_tensor_along_last_dim", + "split_tensor_into_1d_equal_chunks", + "gather_split_1d_tensor", +] diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/random.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf6a8a717e09c597bb08ea774f0971c190646c14 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/random.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/utils.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e67813f6052cc4b4bbb72529217e06da9dfead28 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/__pycache__/utils.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/cross_entropy.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..9147dbbaddd1b9e3a86956991c05a16e99513a8f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/cross_entropy.py @@ -0,0 +1,143 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size +) + +from .utils import VocabUtility + + +class _VocabParallelCrossEntropy(torch.autograd.Function): + + @staticmethod + def forward(ctx, vocab_parallel_logits, target, label_smoothing=0.0): + + # Maximum value along vocab dimension across all GPUs. + logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] + torch.distributed.all_reduce(logits_max, + op=torch.distributed.ReduceOp.MAX, + group=get_tensor_model_parallel_group()) + # Subtract the maximum value. + vocab_parallel_logits = vocab_parallel_logits - logits_max.unsqueeze(dim=-1) + + # Get the partition's vocab indecies + get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size + partition_vocab_size = vocab_parallel_logits.size()[-1] + rank = get_tensor_model_parallel_rank() + world_size = get_tensor_model_parallel_world_size() + vocab_start_index, vocab_end_index = get_vocab_range( + partition_vocab_size, rank, world_size) + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (target < vocab_start_index) | (target >= vocab_end_index) + masked_target = target.clone() - vocab_start_index + masked_target[target_mask] = 0 + + # Get predicted-logits = logits[target]. + # For Simplicity, we convert logits to a 2-D tensor with size + # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. + logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) + masked_target_1d = masked_target.view(-1) + arange_1d = torch.arange(start=0, end=logits_2d.size()[0], + device=logits_2d.device) + predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + predicted_logits = predicted_logits_1d.view_as(target) + predicted_logits[target_mask] = 0.0 + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce(predicted_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_tensor_model_parallel_group()) + + # Sum of exponential of logits along vocab dimension across all GPUs. + exp_logits = vocab_parallel_logits + torch.exp(vocab_parallel_logits, out=exp_logits) + sum_exp_logits = exp_logits.sum(dim=-1) + torch.distributed.all_reduce(sum_exp_logits, + op=torch.distributed.ReduceOp.SUM, + group=get_tensor_model_parallel_group()) + + # Loss = log(sum(exp(logits))) - predicted-logit. + loss = torch.log(sum_exp_logits) - predicted_logits + + # Normalize and optionally smooth logits + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + + vocab_size = exp_logits.size(-1) + if label_smoothing > 0: + """ + We'd like to assign 1 / (K - 1) probability mass to every index that is not the ground truth. + = (1 - alpha) * y_gt + alpha * mean(y_{i for i != gt}) + = (1 - alpha) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i + = ((K - 1) * (1 - alpha) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i != gt} y_i + = (K * (1 - alpha) - 1) / (K - 1)) * y_gt + (alpha / (K - 1)) * \sum_{i} y_i + = (1 - (alpha * K) / (K - 1)) * y_gt + ( (alpha * K) / (K - 1) ) * \sum_{i} y_i / K + From: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/losses/smoothed_cross_entropy.py + """ + assert 1.0 > label_smoothing > 0.0 + smoothing = label_smoothing * vocab_size / (vocab_size - 1) + + # Exp logits at this point are normalized probabilities. So we can just take the log to get log-probs. + log_probs = torch.log(exp_logits) + mean_log_probs = log_probs.mean(dim=-1) + loss = (1.0 - smoothing) * loss - smoothing * mean_log_probs + + ctx.label_smoothing, ctx.vocab_size = label_smoothing, vocab_size + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + # Store softmax, target-mask and masked-target for backward pass. + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + return loss + + @staticmethod + def backward(ctx, grad_output): + + # Retreive tensors from the forward path. + softmax, target_mask, masked_target_1d = ctx.saved_tensors + label_smoothing, vocab_size = ctx.label_smoothing, ctx.vocab_size + + # All the inputs have softmax as thier gradient. + grad_input = softmax + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], + device=grad_2d.device) + + softmax_update = 1.0 - target_mask.view(-1).float() + + if label_smoothing > 0: + smoothing = label_smoothing * vocab_size / (vocab_size - 1) + grad_2d[arange_1d, masked_target_1d] -= (1.0 - smoothing) * softmax_update + average_grad = 1 / vocab_size + grad_2d[arange_1d, :] -= smoothing * average_grad + else: + grad_2d[arange_1d, masked_target_1d] -= softmax_update + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(grad_output.unsqueeze(dim=-1)) + + return grad_input, None, None + + +def vocab_parallel_cross_entropy(vocab_parallel_logits, target, label_smoothing=0.0): + """ + Performs cross entropy loss when logits are split across tensor parallel ranks + + Arguments: + vocab_parallel_logits: logits split across tensor parallel ranks + dimension is [sequence_length, batch_size, hidden_size] + + target: correct vocab ids of dimseion [sequence_length, micro_batch_size] + + lobal_smoothing: smoothing factor, must be in range [0.0, 1.0) + default is no smoothing (=0.0) + """ + return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target, label_smoothing) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/layers.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..b310c4295893f086aca29458ffc61296c79112ed --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/layers.py @@ -0,0 +1,886 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch + +import math +import os +from typing import Optional, Callable +import warnings +from packaging import version + +import torch +import torch.nn.functional as F +import torch.nn.init as init +from torch.nn.parameter import Parameter + +from torch.cuda.amp import custom_fwd, custom_bwd + +from megatron.core.model_parallel_config import ModelParallelConfig + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + get_tensor_model_parallel_group, + get_global_memory_buffer, + is_pipeline_first_stage, +) +from megatron import get_args +from megatron.global_vars import get_num_microbatches +from .mappings import ( + copy_to_tensor_model_parallel_region, + gather_from_tensor_model_parallel_region, + gather_from_sequence_parallel_region, + reduce_from_tensor_model_parallel_region, + scatter_to_tensor_model_parallel_region, + reduce_scatter_to_sequence_parallel_region, +) + +from .random import get_cuda_rng_tracker +from .utils import ( + divide, + split_tensor_along_last_dim, + VocabUtility, +) + +import deepspeed.runtime.activation_checkpointing.checkpointing as ds_checkpointing +from deepspeed.accelerator import get_accelerator + +_grad_accum_fusion_available = True +try: + import fused_weight_gradient_mlp_cuda +except ImportError: + _grad_accum_fusion_available = False + +try: + import habana_frameworks.torch.hpex.experimental.transformer_engine as te +except ImportError: + if get_accelerator().device_name() == 'hpu' and get_args().transformer_impl == "transformer_engine": + raise RuntimeError( + "Device name is hpu and transformer implementation is transformer_engine" + "but couldn't import habana_frameworks.torch.hpex.experimental.transformer_engine" + ) + +_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {'tensor_model_parallel': False, + 'partition_dim': -1, + 'partition_stride': 1} + +def param_is_not_tensor_parallel_duplicate(param): + return (hasattr(param, 'tensor_model_parallel') and + param.tensor_model_parallel) or ( + get_tensor_model_parallel_rank() == 0) + + +def set_tensor_model_parallel_attributes(tensor, is_parallel, dim, stride): + # Make sure the attributes are not set. + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + assert not hasattr(tensor, attribute) + # Set the attributes. + setattr(tensor, 'tensor_model_parallel', is_parallel) + setattr(tensor, 'partition_dim', dim) + setattr(tensor, 'partition_stride', stride) + + +def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor): + def maybe_set(attribute, value): + if not hasattr(tensor, attribute): + setattr(tensor, attribute, value) + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute]) + + +def copy_tensor_model_parallel_attributes(destination_tensor, source_tensor): + def maybe_copy(attribute): + if hasattr(source_tensor, attribute): + setattr(destination_tensor, attribute, + getattr(source_tensor, attribute)) + for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS: + maybe_copy(attribute) + + +def _initialize_affine_weight_gpu(weight, init_method, + partition_dim, stride=1): + """Initialize affine weight for model parallel on GPU.""" + + set_tensor_model_parallel_attributes(tensor=weight, + is_parallel=True, + dim=partition_dim, + stride=stride) + + with get_cuda_rng_tracker().fork(): + init_method(weight) + + +def _initialize_affine_weight_cpu(weight, output_size, input_size, + per_partition_size, partition_dim, + init_method, stride=1, + return_master_weight=False, + *, params_dtype=torch.float32): + """Initialize affine weight for model parallel. + + Build the master weight on all processes and scatter + the relevant chunk.""" + + set_tensor_model_parallel_attributes(tensor=weight, + is_parallel=True, + dim=partition_dim, + stride=stride) + + # Initialize master weight + master_weight = torch.empty(output_size, input_size, + dtype=torch.float, + requires_grad=False) + init_method(master_weight) + master_weight = master_weight.to(dtype=params_dtype) + + # Split and copy + per_partition_per_stride_size = divide(per_partition_size, stride) + weight_list = torch.split(master_weight, per_partition_per_stride_size, + dim=partition_dim) + rank = get_tensor_model_parallel_rank() + world_size = get_tensor_model_parallel_world_size() + my_weight_list = weight_list[rank::world_size] + + with torch.no_grad(): + torch.cat(my_weight_list, dim=partition_dim, out=weight) + if return_master_weight: + return master_weight + return None + + +# This class encapsulates the behavior related to two mechanisms: hpu graph and amax measuring interval +class FP8ModuleRunner(): + def __init__(self, module, measure_interval: int=1, cache_fp8_weight_fwd=False): + self.module = module + self.measure_interval = measure_interval + self.cache_fp8_weight_fwd = cache_fp8_weight_fwd + self.run_cnt = 0 + + def _is_first_microbatch(self): + if not self.cache_fp8_weight_fwd: + return None + + return self.run_cnt % get_num_microbatches() in [1,2] + + def __call__(self, input, weight, bias=None): + self.run_cnt += 1 + measure = self.measure_interval == 1 or self.run_cnt % self.measure_interval == 1 + te.fp8.set_measurement_mode(manual=True, manual_value=measure) + + is_first_microbatch = self._is_first_microbatch() + + return self.module(input, weight, bias, is_first_microbatch=is_first_microbatch) + + +class VocabParallelEmbedding(torch.nn.Module): + """Embedding parallelized in the vocabulary dimension. + + This is mainly adapted from torch.nn.Embedding and all the default + values are kept. + Arguments: + num_embeddings: vocabulary size. + embedding_dim: size of hidden state. + + Keyword Arguments: + config: A megatron.core.ModelParallelConfig object + """ + + def __init__(self, num_embeddings: int, embedding_dim: int, *, + init_method: Callable, + config: ModelParallelConfig): + super(VocabParallelEmbedding, self).__init__() + # Keep the input dimensions. + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + # Set the detauls for compatibility. + self.padding_idx = None + self.max_norm = None + self.norm_type = 2. + self.scale_grad_by_freq = False + self.sparse = False + self._weight = None + self.tensor_model_parallel_size = get_tensor_model_parallel_world_size() + # Divide the weight matrix along the vocaburaly dimension. + self.vocab_start_index, self.vocab_end_index = \ + VocabUtility.vocab_range_from_global_vocab_size( + self.num_embeddings, get_tensor_model_parallel_rank(), + self.tensor_model_parallel_size) + self.num_embeddings_per_partition = self.vocab_end_index - \ + self.vocab_start_index + + # Allocate weights and initialize. + args = get_args() + # only the first stage embedding runs this class' forward. The head's embedding does its own + # thing, so don't waste memory allocating LN weights. + self.layer_norm = None + if is_pipeline_first_stage() and args.embed_layernorm: + from megatron.model import LayerNorm + self.layer_norm = LayerNorm(embedding_dim, sequence_parallel=config.sequence_parallel) + + # Allocate weights and initialize. + if config.use_cpu_initialization: + self.weight = Parameter(torch.empty( + self.num_embeddings_per_partition, self.embedding_dim, + dtype=config.params_dtype)) + if config.perform_initialization: + _initialize_affine_weight_cpu( + self.weight, self.num_embeddings, self.embedding_dim, + self.num_embeddings_per_partition, 0, init_method, + params_dtype=config.params_dtype) + else: + self.weight = Parameter(torch.empty( + self.num_embeddings_per_partition, self.embedding_dim, + device=get_accelerator().current_device_name(), dtype=config.params_dtype)) + if config.perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=0, stride=1) + + def forward(self, input_): + if self.tensor_model_parallel_size > 1: + # Build the mask. + input_mask = (input_ < self.vocab_start_index) | \ + (input_ >= self.vocab_end_index) + # Mask the input. + masked_input = input_.clone() - self.vocab_start_index + masked_input[input_mask] = 0 + else: + masked_input = input_ + # Get the embeddings. + output_parallel = F.embedding(masked_input, self.weight, + self.padding_idx, self.max_norm, + self.norm_type, self.scale_grad_by_freq, + self.sparse) + # Mask the output embedding. + if self.tensor_model_parallel_size > 1: + output_parallel[input_mask, :] = 0.0 + # Reduce across all the model parallel GPUs. + output = reduce_from_tensor_model_parallel_region(output_parallel) + + if self.layer_norm is not None: + output = self.layer_norm(output) + + return output + + +class SequenceParallelPositionEmbedding(torch.nn.Module): + """Embedding parallelized in the sequence dimension. + + Arguments: + sequence_length: max sequence length. + embedding_dim: size of hidden state. + """ + + def __init__(self, sequence_length, embedding_dim): + super(SequenceParallelPositionEmbedding, self).__init__() + sequence_parallel_size = get_tensor_model_parallel_world_size() + assert sequence_length % sequence_parallel_size == 0 + local_sequence_length = sequence_length // sequence_parallel_size + self.offset = local_sequence_length * get_tensor_model_parallel_rank() + self.local_embeddings = torch.nn.Embedding( + local_sequence_length, embedding_dim) + + def forward(self, position_ids): + return self.local_embeddings(position_ids - self.offset) + + +class LinearWithGradAccumulationAndAsyncCommunication(torch.autograd.Function): + """See linear_with_grad_accumulation_and_async_allreduce""" + + @staticmethod + @custom_fwd + def forward(ctx, input, weight, bias, gradient_accumulation_fusion, + async_grad_allreduce, sequence_parallel): + ctx.save_for_backward(input, weight) + ctx.use_bias = bias is not None + ctx.gradient_accumulation_fusion = gradient_accumulation_fusion + ctx.async_grad_allreduce = async_grad_allreduce + ctx.sequence_parallel = sequence_parallel + + if sequence_parallel: + world_size = get_tensor_model_parallel_world_size() + dim_size = list(input.size()) + dim_size[0] = dim_size[0] * world_size + + all_gather_buffer = \ + get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu") + + if version.parse(torch.__version__) >= version.parse('1.13'): + torch.distributed.all_gather_into_tensor( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group()) + else: + torch.distributed._all_gather_base( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group()) + + total_input = all_gather_buffer + else: + total_input = input + + # output = torch.matmul(total_input, weight.t()) + # if bias is not None: + # output = output + bias + output = F.linear(total_input, weight, bias) + return output + + @staticmethod + @custom_bwd + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + use_bias = ctx.use_bias + + if ctx.sequence_parallel: + world_size = get_tensor_model_parallel_world_size() + dim_size = list(input.size()) + dim_size[0] = dim_size[0] * world_size + + all_gather_buffer = \ + get_global_memory_buffer().get_tensor(dim_size, input.dtype, "mpu") + + if version.parse(torch.__version__) >= version.parse('1.13'): + handle = torch.distributed.all_gather_into_tensor( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group(), async_op=True) + else: + handle = torch.distributed._all_gather_base( + all_gather_buffer, + input, + group=get_tensor_model_parallel_group(), async_op=True) + + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # gather is scheduled before the input gradient computation + total_input = all_gather_buffer + else: + total_input = input + grad_input = grad_output.matmul(weight) + + if ctx.sequence_parallel: + handle.wait() + + # Doing gather + slicing during the NeMo forward pass can make this tensor + # not be contiguous. PyTorch only checks if the tensor is contiguous, and only + # clones it if it's not contiguous: + # https://github.com/pytorch/pytorch/blob/c47cf9bc7f9e02f649ab4ed53fe4d35732c92ab6/torch/_refs/__init__.py#L2761 + grad_output = grad_output.contiguous() + # Convert the tensor shapes to 2D for execution compatibility + if len(grad_output.shape) == 3: + grad_output = grad_output.view(grad_output.shape[0] * grad_output.shape[1], + grad_output.shape[2]) + total_input = total_input.view(total_input.shape[0] * total_input.shape[1], + total_input.shape[2]) + else: + # Somehow when DeepSpeed MoE is used, grad_output could have 4 dimensions. + # TODO: May need further investigation + total_input = total_input.contiguous() + grad_output = grad_output.view(-1, grad_output.shape[-1]) + total_input = total_input.view(-1, total_input.shape[-1]) + + if ctx.async_grad_allreduce: + # Asynchronous all-reduce + handle = torch.distributed.all_reduce( + grad_input, group=get_tensor_model_parallel_group(), async_op=True) + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # all-reduce is scheduled before the weight gradient computation + + if ctx.sequence_parallel: + assert not ctx.async_grad_allreduce + dim_size = list(input.size()) + sub_grad_input = torch.empty(dim_size, dtype=input.dtype, + device=get_accelerator().current_device_name(), + requires_grad=False) + # reduce_scatter + handle = torch.distributed._reduce_scatter_base(sub_grad_input, grad_input, + group=get_tensor_model_parallel_group(), + async_op=True) + # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the + # reduce scatter is scheduled before the weight gradient computation + + # TODO: temporary commented + # if ctx.gradient_accumulation_fusion: + # if weight.main_grad.dtype == torch.float32: + # fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, weight.main_grad) + # elif weight.main_grad.dtype in (torch.float16, torch.bfloat16): + # fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, weight.main_grad) + # else: + # raise RuntimeError("Unsupported gradient type for gradient accumulation fusion") + # grad_weight = None + # else: + # grad_weight = grad_output.t().matmul(total_input) + grad_weight = grad_output.t().matmul(total_input) + grad_bias = grad_output.sum(dim=0) if use_bias else None + + if ctx.sequence_parallel: + handle.wait() + return sub_grad_input, grad_weight, grad_bias, None, None, None + + if ctx.async_grad_allreduce: + handle.wait() + + return grad_input, grad_weight, grad_bias, None, None, None + +def linear_with_grad_accumulation_and_async_allreduce( + input: torch.Tensor, + weight: torch.Tensor, + bias: Optional[torch.Tensor], + gradient_accumulation_fusion: bool, + async_grad_allreduce: bool, + sequence_parallel: bool, +) -> torch.Tensor: + """Linear layer execution with asynchronous communication and + gradient accumulation fusion in backprop. + + This has the option to accumulate the result of backprop + calculation into an existing gradient buffer, preventing the need + to do an additional addition kernel after the gradient + calculation. + + Additionally, the tensor parallel all reduce of the input + gradients can be done asynchronously with the calculation of + the weight gradients. + + In the case of sequence parallelism, the reduce scatter of the + input gradients is done asynchronously with the calcluation of the + weight gradients. + + Use of this module requires that the environment variable + CUDA_DEVICE_MAX_CONNECTIONS=1. There are a few collective + operations, noted in the code, that should be scheduled before + compute kernels to overlap the communication with the computation, + which is necessary for a speedup but not for correctness so that + ordering isn't imposed by the scheduler. Setting + CUDA_DEVICE_MAX_CONNECTIONS=1 forces the kernels to be scheduled + in the order they are called. + + Arguments: + + input (torch.Tensor required): input like torch.nn.functional.linear + + weight (torch.Tensor required): weight like torch.nn.functional.linear + + bias (torch.Tensor optional): bias like torch.nn.functional.linear + + gradient_accumulation_fusion (bool required): Perform the gradient + accumulation fusion, requires the custom CUDA extension + fused_weight_gradient_mlp_cuda module. To use + gradient_accumulation_fusion you must install APEX with + --cpp_ext and --cuda_ext. For example: "pip install + --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" + " Note that the extension requires CUDA>=11. Otherwise, you + must turn off gradient accumulation fusion." + + async_grad_allreduce (bool required): Do the allreduce of input + gradients asyncronously with the computation of weight + gradients. If sequence_parallel is True, this must be + False, as no all reduce is performed. + + sequence_parallel (bool required): Indicates that sequence + parallelism is used and thus in the forward pass the input is + all gathered, and the backward pass the input gradients are + reduce scattered. + """ + if not sequence_parallel: + return F.linear(input, weight, bias) + args = [ + input, + weight, + bias, + gradient_accumulation_fusion, + async_grad_allreduce, + sequence_parallel, + ] + + if not linear_with_grad_accumulation_and_async_allreduce.warned: + if get_accelerator().device_name() == "cuda" and os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": + if sequence_parallel: + warnings.warn( + "When using sequence parallelism it is recommended to set the " + "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " + "maximum speedup") + linear_with_grad_accumulation_and_async_allreduce.warned = True + + if async_grad_allreduce: + warnings.warn( + "When using async grad allreduce it is recommended to set the " + "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " + "maximum speedup") + linear_with_grad_accumulation_and_async_allreduce.warned = True + + return LinearWithGradAccumulationAndAsyncCommunication.apply(*args) + +linear_with_grad_accumulation_and_async_allreduce.warned = False + +class ColumnParallelLinear(torch.nn.Module): + """Linear layer with column parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its second dimension as A = [A_1, ..., A_p]. + + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + + Keyword Arguments + bias: If true, add bias + gather_output: If true, call all-gather on output and make Y available + to all GPUs, otherwise, every GPU will have its output + which is Y_i = XA_i + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: If True, do not add the bias term, instead + return it to be added by the caller. This + enables performance optimations where bias can + be fused with other elementwise operations. + + skip_weight_param_allocation: If True, weight parameter is not allocated and must be passed + as a keyword argument `weight` during the forward pass. Note + that this does not affect bias, which will be allocated if + bias is True. Defaults to False. + + config: ModelParallelConfig object + + """ + + def __init__(self, input_size, output_size, *, + config: ModelParallelConfig, + init_method: Callable, + bias=True, gather_output=False, stride=1, + keep_master_weight_for_test=False, + skip_bias_add=False, + skip_weight_param_allocation: bool=False, + moe=False, enable_expert_tensor_parallelism=False): + torch.nn.Module.__init__(self) + + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.gather_output = gather_output + # Divide the weight matrix along the last dimension. + if moe and (not enable_expert_tensor_parallelism): + world_size = 1 + self.is_expert_without_slicing = True + else: + world_size = get_tensor_model_parallel_world_size() + self.is_expert_without_slicing = False + self.output_size_per_partition = divide(output_size, world_size) + self.skip_bias_add = skip_bias_add + self.config = config + + args = get_args() + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if not skip_weight_param_allocation: + if config.use_cpu_initialization: + self.weight = Parameter(torch.empty(self.output_size_per_partition, + self.input_size, + dtype=config.params_dtype)) + if config.perform_initialization: + self.master_weight = _initialize_affine_weight_cpu( + self.weight, self.output_size, self.input_size, + self.output_size_per_partition, 0, init_method, + stride=stride, return_master_weight=keep_master_weight_for_test) + else: + self.weight = Parameter(torch.empty( + self.output_size_per_partition, self.input_size, + device=get_accelerator().current_device_name(), dtype=config.params_dtype)) + if config.perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=0, stride=stride) + else: + self.weight = None + + if bias: + if config.use_cpu_initialization: + self.bias = Parameter(torch.empty( + self.output_size_per_partition, dtype=config.params_dtype)) + else: + self.bias = Parameter(torch.empty( + self.output_size_per_partition, + device=get_accelerator().current_device_name(), + dtype=config.params_dtype)) + set_tensor_model_parallel_attributes(self.bias, True, 0, stride) + if config.perform_initialization: + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + self.async_tensor_model_parallel_allreduce = ( + config.async_tensor_model_parallel_allreduce and + world_size > 1) + + self.sequence_parallel = config.sequence_parallel + if self.sequence_parallel and world_size <= 1: + warnings.warn( + f"`sequence_parallel` is set to `True`, but tensor model parallel size is {world_size}. " + f"Disabling sequence parallel." + ) + self.sequence_parallel = False + + if config.gradient_accumulation_fusion and not _grad_accum_fusion_available: + raise RuntimeError( + "ColumnParallelLinear was called with gradient_accumulation_fusion set " + "to True but the custom CUDA extension fused_weight_gradient_mlp_cuda " + "module is not found. To use gradient_accumulation_fusion you must " + "install APEX with --cpp_ext and --cuda_ext. For example: " + "pip install --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" " + "Note that the extension requires CUDA>=11. Otherwise, you must turn off " + "gradient accumulation fusion." + ) + self.gradient_accumulation_fusion = config.gradient_accumulation_fusion + + if self.async_tensor_model_parallel_allreduce and self.sequence_parallel: + raise RuntimeError( + "`async_tensor_model_parallel_allreduce` and `sequence_parallel` " + "cannot be enabled at the same time." + ) + + self.output_parallel_linear = F.linear + if self.training and args.transformer_impl == "transformer_engine" \ + and get_accelerator().device_name() == "hpu": + linear_fp8 = te.Linear( + self.input_size, + self.output_size_per_partition, + skip_weight_param_allocation=True, + bias=bias, + minimize_memory=not args.cache_fp8_weight) + self.output_parallel_linear = FP8ModuleRunner(linear_fp8, args.fp8_interval, args.cache_fp8_weight_fwd) + + def forward(self, + input_: torch.Tensor, + weight: Optional[torch.Tensor] = None): + """Forward of ColumnParallelLinear + + Args: + input_: 3D tensor whose order of dimension is [sequence, batch, hidden] + + weight (optional): weight tensor to use, compulsory when + skip_weight_param_allocation is True. + + Returns: + - output + - bias + + """ + args = get_args() + + if weight is None: + if self.weight is None: + raise RuntimeError("weight was not supplied to ColumnParallelLinear forward pass " + "and skip_weight_param_allocation is True.") + weight = self.weight + else: + # Check the weight passed in is the correct shape + expected_shape = (self.output_size_per_partition, self.input_size) + if weight.shape != expected_shape: + raise RuntimeError(f"supplied weight's shape is {tuple(weight.shape)}, " + f"not {expected_shape} as expected") + + bias = self.bias if not self.skip_bias_add else None + + if self.async_tensor_model_parallel_allreduce or \ + self.sequence_parallel or \ + self.is_expert_without_slicing: # non-expert only tensor parallelism + input_parallel = input_ + else: + input_parallel = copy_to_tensor_model_parallel_region(input_) + # Matrix multiply. + if args.transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu': + gather_input = lambda x: x + if self.sequence_parallel: + gather_input = gather_from_sequence_parallel_region + output_parallel = self.output_parallel_linear(gather_input(input_parallel), self.weight, self.bias) + else: + output_parallel = linear_with_grad_accumulation_and_async_allreduce( + input=input_parallel, + weight=weight, + bias=bias, + gradient_accumulation_fusion=self.gradient_accumulation_fusion, + async_grad_allreduce=self.async_tensor_model_parallel_allreduce, + sequence_parallel=self.sequence_parallel + ) + if self.gather_output and not self.is_expert_without_slicing: + # All-gather across the partitions. + assert not self.sequence_parallel + output = gather_from_tensor_model_parallel_region(output_parallel) + else: + output = output_parallel + output_bias = self.bias if self.skip_bias_add else None + return output, output_bias + + +class RowParallelLinear(torch.nn.Module): + """Linear layer with row parallelism. + + The linear layer is defined as Y = XA + b. A is parallelized along + its first dimension and X along its second dimension as: + - - + | A_1 | + | . | + A = | . | X = [X_1, ..., X_p] + | . | + | A_p | + - - + Arguments: + input_size: first dimension of matrix A. + output_size: second dimension of matrix A. + + Keyword Arguments: + bias: If true, add bias. Note that bias is not parallelized. + input_is_parallel: If true, we assume that the input is already + split across the GPUs and we do not split + again. + init_method: method to initialize weights. Note that bias is always set + to zero. + stride: For the strided linear layers. + keep_master_weight_for_test: This was added for testing and should be + set to False. It returns the master weights + used for initialization. + skip_bias_add: If True, do not add the bias term, instead + return it to be added by the caller. This + enables performance optimations where bias can + be fused with other elementwise operations. + config: ModelParallelConfig object + + """ + + def __init__(self, input_size: int, output_size: int, *, + config: ModelParallelConfig, + init_method: Callable, + bias: bool = True, + input_is_parallel: bool = False, + stride: int = 1, + keep_master_weight_for_test: bool = False, + skip_bias_add: bool = False, + moe=False, enable_expert_tensor_parallelism=False): + torch.nn.Module.__init__(self) + + # Keep input parameters + self.input_size = input_size + self.output_size = output_size + self.input_is_parallel = input_is_parallel + # Divide the weight matrix along the last dimension. + if moe and (not enable_expert_tensor_parallelism): + world_size = 1 + else: + world_size = get_tensor_model_parallel_world_size() + self.is_expert_without_slicing = moe and world_size==1 + self.input_size_per_partition = divide(input_size, world_size) + self.skip_bias_add = skip_bias_add + self.config = config + self.gradient_accumulation_fusion = config.gradient_accumulation_fusion + self.sequence_parallel = config.sequence_parallel + if self.sequence_parallel and not self.input_is_parallel: + raise RuntimeError("To enable `sequence_parallel`, `input_is_parallel` must be `True`") + + args = get_args() + + # Parameters. + # Note: torch.nn.functional.linear performs XA^T + b and as a result + # we allocate the transpose. + # Initialize weight. + if config.use_cpu_initialization: + self.weight = Parameter(torch.empty(self.output_size, + self.input_size_per_partition, + dtype=config.params_dtype)) + if config.perform_initialization: + self.master_weight = _initialize_affine_weight_cpu( + self.weight, self.output_size, self.input_size, + self.input_size_per_partition, 1, init_method, + stride=stride, return_master_weight=keep_master_weight_for_test, + params_dtype=config.params_dtype) + else: + self.weight = Parameter(torch.empty( + self.output_size, self.input_size_per_partition, + device=get_accelerator().current_device_name(), dtype=config.params_dtype)) + if config.perform_initialization: + _initialize_affine_weight_gpu(self.weight, init_method, + partition_dim=1, stride=stride) + if bias: + if config.use_cpu_initialization: + self.bias = Parameter(torch.empty(self.output_size, + dtype=config.params_dtype)) + else: + self.bias = Parameter(torch.empty( + self.output_size, device=get_accelerator().current_device_name(), + dtype=config.params_dtype)) + setattr(self.bias, 'sequence_parallel', self.sequence_parallel) + + if config.perform_initialization: + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + if args.transformer_impl == "local" and args.normalization != "rmsnorm": + self.tmp = torch.zeros((self.output_size, self.input_size_per_partition), device=get_accelerator().current_device_name(), dtype=torch.int8) + + self.output_parallel_linear = F.linear + if self.training and args.transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu': + linear_fp8 = te.Linear( + self.input_size_per_partition, + self.output_size, + skip_weight_param_allocation=True, + bias=bias, + minimize_memory=not args.cache_fp8_weight) + self.output_parallel_linear = FP8ModuleRunner(linear_fp8, args.fp8_interval, args.cache_fp8_weight_fwd) + + + def forward(self, input_): + """Forward of RowParallelLinear + + Args: + input_: 3D tensor whose order of dimension is [sequence, batch, hidden] + + Returns: + - output + - bias + """ + # Set up backprop all-reduce. + if self.input_is_parallel or self.is_expert_without_slicing: + input_parallel = input_ + else: + assert not self.sequence_parallel + input_parallel = scatter_to_tensor_model_parallel_region(input_) + # Matrix multiply. + if get_args().transformer_impl == "transformer_engine" and get_accelerator().device_name() == 'hpu': + gather_input = lambda x: x + if self.sequence_parallel: + gather_input = gather_from_sequence_parallel_region + output_parallel = self.output_parallel_linear(gather_input(input_parallel), self.weight, self.bias) + else: + output_parallel = linear_with_grad_accumulation_and_async_allreduce( + input=input_parallel, + weight=self.weight, + bias=None, + gradient_accumulation_fusion=self.gradient_accumulation_fusion, + async_grad_allreduce=False, + sequence_parallel=False, + ) + + # All-reduce across all the partitions. + if self.sequence_parallel: + output_ = reduce_scatter_to_sequence_parallel_region(output_parallel) + elif self.is_expert_without_slicing: # non-expert only tensor-parallelism + output_ = output_parallel + else: + output_ = reduce_from_tensor_model_parallel_region(output_parallel) + if not self.skip_bias_add: + output = output_ + self.bias if self.bias is not None else output_ + output_bias = None + else: + output = output_ + output_bias = self.bias + return output, output_bias diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/mappings.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..ed01e691ee19ee93f6ab7a96aa2bbbb7fbaa10a3 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/mappings.py @@ -0,0 +1,290 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + get_tensor_model_parallel_group, +) +from .utils import split_tensor_along_last_dim +from deepspeed.accelerator import get_accelerator + + +def get_async_op(): + async_op = False + if get_accelerator().device_name() == "hpu": + async_op = True + return async_op + + +def _reduce(input_): + """All-reduce the input tensor across model parallel group.""" + + # Bypass the function if we are using only 1 GPU. + if get_tensor_model_parallel_world_size()==1: + return input_ + + # All-reduce. + torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group(), async_op=get_async_op()) + + return input_ + + +def _split_along_last_dim(input_): + """Split the tensor along its last dimension and keep the + corresponding slice.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along last dimension. + input_list = split_tensor_along_last_dim(input_, world_size) + + # Note: torch.split does not create contiguous tensors by default. + rank = get_tensor_model_parallel_rank() + output = input_list[rank].contiguous() + + return output + + +def _split_along_first_dim(input_): + """Split the tensor along its first dimension and keep the + corresponding slice.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Split along first dimension. + dim_size = input_.size()[0] + assert dim_size % world_size == 0, \ + "First dimension of the tensor should be divisible by tensor parallel size" + local_dim_size = dim_size // world_size + rank = get_tensor_model_parallel_rank() + dim_offset = rank * local_dim_size + + output = input_[dim_offset:dim_offset+local_dim_size].contiguous() + + return output + + +def _gather_along_last_dim(input_): + """Gather tensors and concatinate along the last dimension.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + # Size and dimension. + last_dim = input_.dim() - 1 + rank = get_tensor_model_parallel_rank() + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=get_tensor_model_parallel_group(), async_op=get_async_op()) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + +def _gather_along_first_dim(input_): + """Gather tensors and concatinate along the first dimension.""" + + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + dim_size = list(input_.size()) + dim_size[0] = dim_size[0] * world_size + + output = torch.empty(dim_size, dtype=input_.dtype, + device=get_accelerator().current_device_name()) + torch.distributed._all_gather_base(output, input_.contiguous(), + group=get_tensor_model_parallel_group(), + async_op=get_async_op()) + + return output + +def _reduce_scatter_along_first_dim(input_): + """Reduce-scatter the input tensor across model parallel group.""" + world_size = get_tensor_model_parallel_world_size() + # Bypass the function if we are using only 1 GPU. + if world_size == 1: + return input_ + + dim_size = list(input_.size()) + assert dim_size[0] % world_size == 0, \ + "First dimension of the tensor should be divisible by tensor parallel size" + + dim_size[0] = dim_size[0] // world_size + + output = torch.empty(dim_size, dtype=input_.dtype, + device=get_accelerator().current_device_name()) + torch.distributed._reduce_scatter_base(output, input_.contiguous(), + group=get_tensor_model_parallel_group(), + async_op=get_async_op()) + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + """Pass the input to the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return input_ + + @staticmethod + def forward(ctx, input_): + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce(input_) + + @staticmethod + def forward(ctx, input_): + return _reduce(input_) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _ScatterToModelParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split_along_last_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _split_along_last_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_last_dim(grad_output) + + +class _GatherFromModelParallelRegion(torch.autograd.Function): + """Gather the input from model parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_): + return _gather_along_last_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _gather_along_last_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _split_along_last_dim(grad_output) + + +class _ScatterToSequenceParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _split_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_first_dim(grad_output) + + +class _GatherFromSequenceParallelRegion(torch.autograd.Function): + """Gather the input from sequence parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_, tensor_parallel_output_grad=True): + return _gather_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_, tensor_parallel_output_grad=True): + ctx.tensor_parallel_output_grad = tensor_parallel_output_grad + return _gather_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + tensor_parallel_output_grad = ctx.tensor_parallel_output_grad + + # If the computation graph after the gather operation is + # in the tensor parallel mode, output gradients need to reduce + # scattered and whereas if the computation is duplicated, + # output gradients need to be scattered. + if tensor_parallel_output_grad: + return _reduce_scatter_along_first_dim(grad_output), None + else: + return _split_along_first_dim(grad_output), None + + +class _ReduceScatterToSequenceParallelRegion(torch.autograd.Function): + """Reduce scatter the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce_scatter_along_first_dim(input_) + + @staticmethod + def forward(ctx, input_): + return _reduce_scatter_along_first_dim(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather_along_first_dim(grad_output) + + +# ----------------- +# Helper functions. +# ----------------- + +def copy_to_tensor_model_parallel_region(input_): + return _CopyToModelParallelRegion.apply(input_) + + +def reduce_from_tensor_model_parallel_region(input_): + return _ReduceFromModelParallelRegion.apply(input_) + + +def scatter_to_tensor_model_parallel_region(input_): + return _ScatterToModelParallelRegion.apply(input_) + + +def gather_from_tensor_model_parallel_region(input_): + return _GatherFromModelParallelRegion.apply(input_) + + +def scatter_to_sequence_parallel_region(input_): + return _ScatterToSequenceParallelRegion.apply(input_) + + +def gather_from_sequence_parallel_region(input_, tensor_parallel_output_grad=True): + return _GatherFromSequenceParallelRegion.apply(input_, tensor_parallel_output_grad) + + +def reduce_scatter_to_sequence_parallel_region(input_): + return _ReduceScatterToSequenceParallelRegion.apply(input_) + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/random.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/random.py new file mode 100644 index 0000000000000000000000000000000000000000..dd287115a59586ace9f8d569f19dc5559ee0f984 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/tensor_parallel/random.py @@ -0,0 +1,330 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +# Parts of the code here are adapted from PyTorch +# repo: https://github.com/pytorch/pytorch + +import contextlib + +import torch +from torch import _C +from deepspeed.accelerator import get_accelerator +from torch.utils.checkpoint import detach_variable + +from megatron import get_args +from megatron.memory import allocate_mem_buff +from megatron.core.parallel_state import ( + get_data_parallel_rank, + get_tensor_model_parallel_group, + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, +) + +from .utils import ( + split_tensor_into_1d_equal_chunks, + gather_split_1d_tensor, +) + +from megatron.core.utils import safely_set_viewless_tensor_data + +import deepspeed + +# Default name for the model parallel rng tracker. +_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' + +# Whether apply model parallelsim to checkpointed hidden states. +_CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = None + + +def init_checkpointed_activations_memory_buffer(): + """Initializ the memory buffer for the checkpointed activations.""" + args = get_args() + + per_layer = args.micro_batch_size * args.max_position_embeddings * \ + args.hidden_size // args.tensor_model_parallel_size + assert args.num_layers % args.checkpoint_num_layers == 0, \ + 'number of layers is not divisible by checkpoint-num-layers' + num_checkpointer_layers = args.num_layers // args.checkpoint_num_layers + numel = per_layer * num_checkpointer_layers + dtype = torch.half + if not args.fp16: + dtype = torch.float + + global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER + assert _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None, \ + 'checkpointed activations memory buffer is already allocated.' + _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff( + 'checkpointed activations', numel, dtype, track_usage=False) + + +def reset_checkpointed_activations_memory_buffer(): + """Reset the memory used for checkpointing.""" + if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None: + _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset() + +def _set_cuda_rng_state(new_state, device=-1): + """Sets the random number generator state of the current GPU. + + Argumentss: + new_state (torch.ByteTensor): The desired state + This function is adapted from PyTorch repo (torch.cuda.set_rng_state) + with a single change: the input state is not cloned. Cloning caused + major performance issues for +4 GPU cases. + """ + if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): + # older PyTorch + def cb(): + with get_accelerator().device(device): + _C._cuda_setRNGState(new_state) + else: + # newer PyTorch + if device == -1: + device = torch.device(get_accelerator().device_name()) + elif isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device(get_accelerator().device_name(), device) + + def cb(): + idx = device.index + if idx is None: + idx = get_accelerator().current_device() + default_generator = get_accelerator().default_generator(idx) + default_generator.set_state(new_state) + + get_accelerator().lazy_call(cb) + + + +class CudaRNGStatesTracker: + """Tracker for the cuda RNG states. + + Using the `add` method, a cuda rng state is initialized based on + the input `seed` and is assigned to `name`. Later, by forking the + rng state, we can perform operations and return to our starting + cuda state. + """ + + def __init__(self): + # Map from a string name to the cuda rng state. + self.states_ = {} + # Seeds are just for book keeping and ensure no seed is set twice. + self.seeds_ = set() + + def reset(self): + """Set to the initial state (no tracker).""" + self.states_ = {} + self.seeds_ = set() + + def get_states(self): + """Get rng states. Copy the dictionary so we have direct + pointers to the states, not just a pointer to the dictionary.""" + states = {} + for name in self.states_: + states[name] = self.states_[name] + return states + + def set_states(self, states): + """Set the rng states. For efficiency purposes, we do not check + the size of seed for compatibility.""" + self.states_ = states + + def add(self, name, seed): + """Track the rng state.""" + # Check seed is not already used. + if seed in self.seeds_: + raise Exception('seed {} already exists'.format(seed)) + self.seeds_.add(seed) + # Check that state is not already defined. + if name in self.states_: + raise Exception('cuda rng state {} already exists'.format(name)) + # Get the current rng state. + orig_rng_state = get_accelerator().get_rng_state() + # Set the new state and store it. + get_accelerator().manual_seed(seed) + self.states_[name] = get_accelerator().get_rng_state() + # Reset rng state to what it was. + _set_cuda_rng_state(orig_rng_state) + + @contextlib.contextmanager + def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME): + """Fork the cuda rng state, perform operations, and exit with + the original state.""" + # Check if we have added the state + if name not in self.states_: + print(name, self.states_) + raise Exception('cuda rng state {} is not added'.format(name)) + # Store current rng state. + orig_cuda_rng_state = get_accelerator().get_rng_state() + # Set rng state to the desired one + _set_cuda_rng_state(self.states_[name]) + # Do the stuff we wanted to do. + try: + yield + finally: + # Update the current rng state for later use. + self.states_[name] = get_accelerator().get_rng_state() + # And set the state to the original state we started with. + _set_cuda_rng_state(orig_cuda_rng_state) + + +# RNG tracker object. +_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker() + +def get_cuda_rng_tracker(): + """Get cuda rng tracker.""" + if deepspeed.checkpointing.is_configured(): + return deepspeed.checkpointing.get_cuda_rng_tracker() + + return _CUDA_RNG_STATE_TRACKER + + +def model_parallel_cuda_manual_seed(seed): + """Initialize model parallel cuda seed. + + This function should be called after the model parallel is + initialized. Also, no torch.cuda.manual_seed should be called + after this function. Basically, this is replacement for that + function. + Two set of RNG states are tracked: + default state: This is for data parallelism and is the same among a + set of model parallel GPUs but different across + different model paralle groups. This is used for + example for dropout in the non-tensor-model-parallel regions. + tensor-model-parallel state: This state is different among a set of model + parallel GPUs, but the same across data parallel + groups. This is used for example for dropout in + model parallel regions. + """ + if deepspeed.checkpointing.is_configured(): + return deepspeed.checkpointing.model_parallel_cuda_manual_seed(seed) + + # 2718 is just for fun and any POSITIVE value will work. + offset = seed + 2718 + tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank() + # Data parallel gets the original seed. + data_parallel_seed = seed + + if torch.distributed.get_rank() == 0: + print('> initializing model parallel cuda seeds on global rank {}, ' + 'model parallel rank {}, and data parallel rank {} with ' + 'model parallel seed: {} and data parallel seed: {}'.format( + torch.distributed.get_rank(), get_tensor_model_parallel_rank(), + get_data_parallel_rank(), tensor_model_parallel_seed, + data_parallel_seed), flush=True) + _CUDA_RNG_STATE_TRACKER.reset() + # Set the default state. + get_accelerator().manual_seed(data_parallel_seed) + # and model parallel state. + _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, + tensor_model_parallel_seed) + + +def model_parallel_reconfigure_tp_seed(seed): + if deepspeed.checkpointing.is_configured(): + return deepspeed.checkpointing.model_parallel_reconfigure_tp_seed(seed) + + model_parallel_seed = seed + 2718 + get_tensor_model_parallel_rank() + with _CUDA_RNG_STATE_TRACKER.fork(): + get_accelerator().manual_seed(model_parallel_seed) + + +class CheckpointFunction(torch.autograd.Function): + """This function is adapted from torch.utils.checkpoint with + two main changes: + 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` + 2) the states in the model parallel tracker are also properly + tracked/set/reset. + """ + @staticmethod + def forward(ctx, run_function, distribute_saved_activations, *args): + ctx.run_function = run_function + ctx.distribute_saved_activations \ + = distribute_saved_activations + + # Copy the rng states. + ctx.fwd_cpu_rng_state = torch.get_rng_state() + ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state() + ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + with torch.no_grad(): + outputs = run_function(*args) + + # Divide hidden states across model parallel group and only keep + # the chunk corresponding to the current rank. + if distribute_saved_activations: + ctx.input_0_shape = args[0].data.shape + safely_set_viewless_tensor_data( + args[0], + split_tensor_into_1d_equal_chunks(args[0].data, new_buffer=True)) + + # HACK: currently when DeepSpeed is used, we always set + # distribute_saved_activations to false, and use the following older + # activation checkpointing mechanisms + if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None: + ctx.input_0_shape = args[0].data.shape + args[0].data = split_tensor_into_1d_equal_chunks(args[0].data) + args[0].data = _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.add( + args[0].data) + + # Store everything. + ctx.save_for_backward(*args) + + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError("Checkpointing is not compatible with .grad(), " + "please use .backward() if possible") + inputs = ctx.saved_tensors + if ctx.distribute_saved_activations: + safely_set_viewless_tensor_data( + inputs[0], + gather_split_1d_tensor(inputs[0].data).view(ctx.input_0_shape)) + # HACK: currently when DeepSpeed is used, we always set + # distribute_saved_activations to false, and use the following older + # activation checkpointing mechanisms + if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None: + inputs[0].data = gather_split_1d_tensor(inputs[0].data) + inputs[0].data = inputs[0].data.view(ctx.input_0_shape) + + # Store the current states. + bwd_cpu_rng_state = torch.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() + bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() + + # Set the states to what it used to be before the forward pass. + torch.set_rng_state(ctx.fwd_cpu_rng_state) + _set_cuda_rng_state(ctx.fwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) + + # Compute the forward pass. + detached_inputs = detach_variable(inputs) + with torch.enable_grad(): + outputs = ctx.run_function(*detached_inputs) + + # Set the states back to what it was at the start of this function. + torch.set_rng_state(bwd_cpu_rng_state) + _set_cuda_rng_state(bwd_cuda_rng_state) + get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + elif len(outputs) == 2 and isinstance(outputs[1], torch.Tensor) and \ + torch.equal(outputs[1], torch.tensor(0).to(get_accelerator().device_name())): + # a hacky solution to overcome issue when running old script examples/pretrain_gpt_distributed.sh + outputs = (outputs[0],) + torch.autograd.backward(outputs, args) + grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp + for inp in detached_inputs) + return (None, None) + grads + + +def checkpoint(function, distribute_saved_activations, *args): + """Checkpoint a model or part of the model. + This has been directly copied from torch.utils.checkpoint.""" + if deepspeed.checkpointing.is_configured(): + return deepspeed.checkpointing.checkpoint(function, *args) + + return CheckpointFunction.apply(function, + distribute_saved_activations, *args) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/attention.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..15818bddf1f67f20469b82146109cbaf72f0b659 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/attention.py @@ -0,0 +1,267 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +from abc import ABC, abstractmethod +from .enums import AttnMaskType +from .transformer_config import TransformerConfig +import torch + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.transformer.core_attention import CoreAttention +from megatron.core.utils import divide + +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.enums import AttnType, AttnMaskType +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.custom_layers.transformer_engine import \ + TECoreAttention, TEColumnParallelLinear, TERowParallelLinear + +class Attention(MegatronModule, ABC): + """Attention layer abstract class. + + This layer only contains common modules required for the "self attn" and + "cross attn" specializations. + """ + + def __init__( + self, + config: TransformerConfig, + layer_number: int = 1, + attn_mask_type=AttnMaskType.padding, + ): + super().__init__(config=config) + + self.config = config + self.layer_number = layer_number + self.attn_mask_type = attn_mask_type + + self.projection_size = self.config.kv_channels * self.config.num_attention_heads + + # Per attention head and per partition values. + world_size = parallel_state.get_tensor_model_parallel_world_size() + self.hidden_size_per_attention_head = divide(self.projection_size, self.config.num_attention_heads) + self.num_attention_heads_per_partition = divide(self.config.num_attention_heads, world_size) + + self.core_attention = TECoreAttention( + config=self.config, + layer_number=self.layer_number, + attn_mask_type=self.attn_mask_type + ) + + self.checkpoint_core_attention = self.config.recompute_granularity == 'selective' + + # Output. + self.linear_proj = TERowParallelLinear( + self.projection_size, + self.config.hidden_size, + config=self.config, + init_method=self.config.output_layer_init_method, + bias=self.config.add_bias_linear, + skip_bias_add=True, + ) + + def _checkpointed_attention_forward(self, query, key, value, attention_mask): + """Forward method with selective activation checkpointing.""" + + def custom_forward(*inputs): + query = inputs[0] + key = inputs[1] + value = inputs[2] + attention_mask = inputs[3] + output_ = self.core_attention(query, key, value, attention_mask) + return output_ + + hidden_states = tensor_parallel.checkpoint( + custom_forward, False, query, key, value, attention_mask + ) + + return hidden_states + + def _allocate_memory(self, inference_max_sequence_len, batch_size): + return torch.empty( + inference_max_sequence_len, + batch_size, + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + dtype=self.params_dtype, + device=torch.cuda.current_device(), + ) + + @abstractmethod + def get_query_key_value_tensors(self, hidden_states, key_value_states): + """ + This method needs to be implemented based on whether the derived class + is "self-attn" or "cross-attn". + """ + + def forward(self, hidden_states, attention_mask, key_value_states=None, inference_params=None): + # hidden_states: [sq, b, h] + + # ================================================= + # Pre-allocate memory for key-values for inference. + # ================================================= + # @jcasper how should we do inference_params? + # can do 1. args, 2. add inference params to TransformerConfig + # 3. create another config object 4. something else? + if inference_params: + if self.layer_number not in inference_params.key_value_memory_dict: + inf_max_seq_len = inference_params.max_sequence_len + inf_max_batch_size = inference_params.max_batch_size + inference_key_memory = self._allocate_memory(inf_max_seq_len, inf_max_batch_size) + inference_value_memory = self._allocate_memory(inf_max_seq_len, inf_max_batch_size) + inference_params.key_value_memory_dict[self.layer_number] = ( + inference_key_memory, + inference_value_memory, + ) + else: + inference_key_memory, inference_value_memory = inference_params.key_value_memory_dict[ + self.layer_number + ] + + # ===================== + # Query, Key, and Value + # ===================== + # Get the query, key and value tensors based on the type of attention - + # self or cross attn. + query, key, value = self.get_query_key_value_tensors(hidden_states, key_value_states) + + # ================================== + # Adjust key and value for inference + # ================================== + + if inference_params: + batch_start = inference_params.batch_size_offset + batch_end = batch_start + key.size(1) + assert batch_end <= inference_key_memory.size(1) + sequence_start = inference_params.sequence_len_offset + sequence_end = sequence_start + key.size(0) + assert sequence_end <= inference_key_memory.size(0) + # Copy key and values. + inference_key_memory[sequence_start:sequence_end, batch_start:batch_end, ...] = key + inference_value_memory[sequence_start:sequence_end, batch_start:batch_end, ...] = value + key = inference_key_memory[:sequence_end, batch_start:batch_end, ...] + value = inference_value_memory[:sequence_end, batch_start:batch_end, ...] + + # ================================== + # core attention computation + # ================================== + + if self.checkpoint_core_attention: + core_attn_out = self._checkpointed_attention_forward(query, key, value, attention_mask) + else: + core_attn_out = self.core_attention(query, key, value, attention_mask) + + # ================= + # Output. [sq, b, h] + # ================= + + output, bias = self.linear_proj(core_attn_out) + + return output, bias + +class SelfAttention(Attention): + """Self-attention layer class + + Self-attention layer takes input with size [s, b, h] + and returns output of the same size. + """ + def __init__(self, + config: TransformerConfig, + layer_number: int = 1, + attn_mask_type=AttnMaskType.padding): + super().__init__( + config=config, + layer_number=layer_number, + attn_mask_type=attn_mask_type + ) + + self.linear_qkv = TEColumnParallelLinear( + self.config.hidden_size, + 3 * self.projection_size, + config=self.config, + init_method=self.config.init_method, + bias=self.config.add_bias_linear, + skip_bias_add=False + ) + + def get_query_key_value_tensors(self, hidden_states, key_value_states=None): + """ + Derives `query`, `key` and `value` tensors from `hidden_states`. + """ + # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] + mixed_qkv, _ = self.linear_qkv(hidden_states) + + # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn] + new_tensor_shape = mixed_qkv.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_qkv = mixed_qkv.view(*new_tensor_shape) + + # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] + (query, key, value) = tensor_parallel.split_tensor_along_last_dim(mixed_qkv, 3) + + return query, key, value + +class CrossAttention(Attention): + """Cross-attention layer class + + Cross-attention layer takes input with size [s, b, h] and context with size + [s, b, h] and returns output of the same size. + """ + def __init__(self, + config: TransformerConfig, + layer_number: int = 1, + attn_mask_type=AttnMaskType.padding): + super().__init__( + config=config, + layer_number=layer_number, + attn_mask_type=attn_mask_type + ) + + self.linear_q = TEColumnParallelLinear( + self.config.hidden_size, + self.projection_size, + config=self.config, + init_method=self.config.init_method, + bias=self.config.add_bias_linear, + skip_bias_add=False + ) + + self.linear_kv = TEColumnParallelLinear( + self.config.hidden_size, + 2 * self.projection_size, + config=self.config, + init_method=self.config.init_method, + bias=self.config.add_bias_linear, + skip_bias_add=False + ) + + def get_query_key_value_tensors(self, hidden_states, key_value_states): + """ + Derives `query` tensor from `hidden_states`, and `key`/`value` tensors + from `key_value_states`. + """ + # Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)] + mixed_kv, _ = self.linear_kv(key_value_states) + + # [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn] + new_tensor_shape = mixed_kv.size()[:-1] + ( + self.num_attention_heads_per_partition, + 2 * self.hidden_size_per_attention_head, + ) + mixed_kv = mixed_kv.view(*new_tensor_shape) + + # [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn] + (key, value) = tensor_parallel.split_tensor_along_last_dim(mixed_kv, 2) + + # Attention head [sq, b, h] --> [sq, b, hp] + query, _ = self.linear_q(hidden_states) + + # [sq, b, hp] --> [sq, b, np, hn] + new_tensor_shape = query.size()[:-1] + ( + self.num_attention_heads_per_partition, + self.hidden_size_per_attention_head, + ) + query = query.view(*new_tensor_shape) + + return query, key, value diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/core_attention.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/core_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5795a794c27f27028990554923629113243e0c --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/core_attention.py @@ -0,0 +1,146 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + + +import math + +import torch +from torch import Tensor + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.utils import divide +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.enums import AttnMaskType +from megatron.core.transformer.utils import attention_mask_func +from megatron.core.fusions.fused_softmax import FusedScaleMaskSoftmax + + +class CoreAttention(MegatronModule): + """ + Region where selective activation recomputation is applied. + This region is memory intensive but less compute intensive which + makes activation checkpointing more efficient for LLMs (20B+). + See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details. + + We use the following notation: + h: hidden size + n: number of attention heads + p: number of tensor model parallel partitions + b: batch size + s: sequence length + """ + + def __init__(self, config: TransformerConfig, layer_number: int = 1, attn_mask_type=AttnMaskType.padding): + super().__init__(config=config) + + self.config: TransformerConfig = config + + self.layer_number = max(1, layer_number) + self.attn_mask_type = attn_mask_type + + projection_size = self.config.kv_channels * config.num_attention_heads + + # Per attention head and per partition values. + world_size = parallel_state.get_tensor_model_parallel_world_size() + self.hidden_size_per_partition = divide(projection_size, world_size) + self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads) + self.num_attention_heads_per_partition = divide(config.num_attention_heads, world_size) + + coeff = None + self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) + if self.config.apply_query_key_layer_scaling: + coeff = self.layer_number + self.norm_factor *= coeff + + self.scale_mask_softmax = FusedScaleMaskSoftmax( + input_in_fp16=self.config.fp16, + input_in_bf16=self.config.bf16, + attn_mask_type=self.attn_mask_type, + scaled_masked_softmax_fusion=self.config.masked_softmax_fusion, + mask_func=attention_mask_func, + softmax_in_fp32=self.config.attention_softmax_in_fp32, + scale=coeff, + ) + + # Dropout. Note that for a single iteration, this layer will generate + # different outputs on different number of parallel partitions but + # on average it should not be partition dependent. + self.attention_dropout = torch.nn.Dropout(self.config.attention_dropout) + + def forward(self, query_layer: Tensor, key_layer: Tensor, value_layer: Tensor, attention_mask: Tensor): + + # =================================== + # Raw attention scores. [b, n/p, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + # preallocting input tensor: [b * np, sq, sk] + matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor( + (output_size[0] * output_size[1], output_size[2], output_size[3]), query_layer.dtype, "mpu" + ) + + # Raw attention scores. [b * np, sq, sk] + matmul_result = torch.baddbmm( + matmul_input_buffer, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=(1.0 / self.norm_factor), + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + # =========================== + # Attention probs and dropout + # =========================== + + # attention scores and attention mask [b, np, sq, sk] + attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + + if not self.config.sequence_parallel: + with tensor_parallel.get_cuda_rng_tracker().fork(): + attention_probs = self.attention_dropout(attention_probs) + else: + attention_probs = self.attention_dropout(attention_probs) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + return context_layer diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/module.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/module.py new file mode 100644 index 0000000000000000000000000000000000000000..43d1bccb6faa4fc8c12efcc379c7b220a49af42f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/module.py @@ -0,0 +1,118 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +"""Megatron Module""" + +import torch +from torch.autograd import Variable +from torch.nn.parameter import Parameter + +from megatron.core import parallel_state, tensor_parallel +from megatron.core.transformer.transformer_config import TransformerConfig + + +_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) +_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) +_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor) + + +def param_is_not_shared(param): + return not hasattr(param, 'shared') or not param.shared + + +class MegatronModule(torch.nn.Module): + """Megatron specific extensions of torch Module with support + for pipelining.""" + + # def __init__(self, config: TransformerConfig, share_word_embeddings=True): + def __init__(self, config: TransformerConfig): + super().__init__() + self.config = config + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + """Use this function to override the state dict for + saving checkpoints.""" + return self.state_dict(prefix=prefix, keep_vars=keep_vars) + + +def conversion_helper(val, conversion): + """Apply conversion to val. Recursively apply conversion if `val` + #is a nested tuple/list structure.""" + if not isinstance(val, (tuple, list)): + return conversion(val) + rtn = [conversion_helper(v, conversion) for v in val] + if isinstance(val, tuple): + rtn = tuple(rtn) + return rtn + + +def fp32_to_float16(val, float16_convertor): + """Convert fp32 `val` to fp16/bf16""" + + def half_conversion(val): + val_typecheck = val + if isinstance(val_typecheck, (Parameter, Variable)): + val_typecheck = val.data + if isinstance(val_typecheck, _FLOAT_TYPES): + val = float16_convertor(val) + return val + + return conversion_helper(val, half_conversion) + + +def float16_to_fp32(val): + """Convert fp16/bf16 `val` to fp32""" + + def float_conversion(val): + val_typecheck = val + if isinstance(val_typecheck, (Parameter, Variable)): + val_typecheck = val.data + if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)): + val = val.float() + return val + + return conversion_helper(val, float_conversion) + + +class Float16Module(MegatronModule): + def __init__(self, config: TransformerConfig, module: torch.nn.Module): + super(Float16Module, self).__init__(config) + self.config = config + self.fp16 = config.fp16 + self.bf16 = config.bf16 + + if self.fp16: + self.add_module('module', module.half()) + + def float16_convertor(val): + return val.half() + + elif self.bf16: + self.add_module('module', module.bfloat16()) + + def float16_convertor(val): + return val.bfloat16() + + else: + raise Exception('Either config.fp16 or config.bf16 should be True.') + + self.float16_convertor = float16_convertor + + def set_input_tensor(self, input_tensor): + return self.module.set_input_tensor(input_tensor) + + def forward(self, *inputs, **kwargs): + if parallel_state.is_pipeline_first_stage(): + inputs = fp32_to_float16(inputs, self.float16_convertor) + outputs = self.module(*inputs, **kwargs) + if parallel_state.is_pipeline_last_stage(): + outputs = float16_to_fp32(outputs) + return outputs + + def state_dict(self, destination=None, prefix='', keep_vars=False): + return self.module.state_dict(prefix=prefix, keep_vars=keep_vars) + + def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): + return self.module.state_dict_for_save_checkpoint(prefix=prefix, keep_vars=keep_vars) + + def load_state_dict(self, state_dict, strict=True): + self.module.load_state_dict(state_dict, strict=strict) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/transformer_layer.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/transformer_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..19804e4c6030d44f7e7c259756a865191fb70633 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/transformer/transformer_layer.py @@ -0,0 +1,122 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +import torch + +from megatron.core.transformer.module import MegatronModule +from megatron.core.transformer.transformer_config import TransformerConfig +from megatron.core.transformer.enums import AttnType, AttnMaskType +from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add +from megatron.core.transformer.attention import SelfAttention +from megatron.core.transformer.mlp import MLP +from megatron.core.utils import make_viewless_tensor +from megatron.core.transformer.custom_layers.transformer_engine import TELayerNorm + +class TransformerLayer(MegatronModule): + """A single transformer layer. + + Transformer layer takes input with size [s, b, h] and returns an + output of the same size. + """ + + def __init__( + self, config: TransformerConfig, layer_number: int = 1, self_attn_mask_type=AttnMaskType.padding, + ): + super().__init__(config=config) + self.config: TransformerConfig = config + + self.layer_number = layer_number + self.self_attn_mask_type = self_attn_mask_type + + # Layernorm on the input data. + # TODO: add pytorch only layernorm + self.input_layernorm = TELayerNorm( + hidden_size=self.config.hidden_size, + eps=self.config.layernorm_epsilon, + persist_layer_norm=self.config.persist_layer_norm, + sequence_parallel=self.config.sequence_parallel, + zero_centered_gamma=self.config.layernorm_zero_centered_gamma, + ) + + # Self attention. + self.self_attention = SelfAttention( + config=self.config, + layer_number=layer_number, + attn_mask_type=self_attn_mask_type, + ) + + # Layernorm on the attention output + self.post_self_attn_layernorm = TELayerNorm( + hidden_size=self.config.hidden_size, + eps=self.config.layernorm_epsilon, + persist_layer_norm=self.config.persist_layer_norm, + sequence_parallel=self.config.sequence_parallel, + zero_centered_gamma=self.config.layernorm_zero_centered_gamma, + ) + + # MLP + self.mlp = MLP(config=self.config) + + # @jcasper how should we handle nvfuser? + # Set bias+dropout+add fusion grad_enable execution handler. + # TORCH_MAJOR = int(torch.__version__.split('.')[0]) + # TORCH_MINOR = int(torch.__version__.split('.')[1]) + # use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10) + # self.bias_dropout_add_exec_handler = nullcontext if use_nvfuser else torch.enable_grad + self.bias_dropout_add_exec_handler = torch.enable_grad + + self.bias_dropout_add_func = get_bias_dropout_add( + self.training, + self.config.bias_dropout_fusion + ) + + # TODO: decide how to do inference_params + def forward( + self, hidden_states, attention_mask, encoder_output=None, enc_dec_attn_mask=None, inference_params=None + ): + # hidden_states: [s, b, h] + + # Layer norm at the beginning of the transformer layer. + layernorm_output = self.input_layernorm(hidden_states) + # Self attention. + attention_output_with_bias = self.self_attention( + layernorm_output, attention_mask, inference_params=inference_params + ) + + # Residual connection. + if self.config.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = hidden_states + + # bias_dropout_add fusion returning fp32 instead of bf16 + with self.bias_dropout_add_exec_handler(): + layernorm_input = self.bias_dropout_add_func( + attention_output_with_bias, residual, self.config.hidden_dropout + ) + + # Layer norm post the self attention. + layernorm_output = self.post_self_attn_layernorm(layernorm_input) + + # MLP. + mlp_output_with_bias = self.mlp(layernorm_output) + + # Second residual connection. + if self.config.apply_residual_connection_post_layernorm: + residual = layernorm_output + else: + residual = layernorm_input + + with self.bias_dropout_add_exec_handler(): + output = self.bias_dropout_add_func( + mlp_output_with_bias, residual, self.config.hidden_dropout + ) + + # Jit compiled function creates 'view' tensor. This tensor + # potentially gets saved in the MPU checkpoint function context, + # which rejects view tensors. While making a viewless tensor here + # won't result in memory savings (like the data loader, or + # p2p_communication), it serves to document the origin of this + # 'view' tensor. + output = make_viewless_tensor(inp=output, requires_grad=output.requires_grad, keep_graph=True) + + return output diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/utils.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e672c376de2161b7f160e321bc354b5dca15c161 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/core/utils.py @@ -0,0 +1,171 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +"""Utility functions used throughout Megatron core""" +from functools import reduce +import math +import operator + +import torch + +from megatron.core import parallel_state +from megatron import get_args + +from deepspeed import get_accelerator + + +def ensure_divisibility(numerator, denominator): + """Ensure that numerator is divisible by the denominator.""" + assert numerator % denominator == 0, "{} is not divisible by {}".format( + numerator, denominator + ) + + +def divide(numerator, denominator): + """Ensure that numerator is divisible by the denominator and return + the division value.""" + ensure_divisibility(numerator, denominator) + return numerator // denominator + +def get_attr_wrapped_model(model, attr, allow_none=True): + """Get an attribute from a wrapped model""" + if isinstance(model, list): + raise RuntimeError("_get_attr_wrapped_model given a list of models") + + if allow_none: + def condition(model, attr): + return not hasattr(model, attr) + else: + def condition(model, attr): + return getattr(model, attr, None) is None + + while condition(model, attr): + if not hasattr(model, "module"): + raise RuntimeError(f"_get_attr_wrapped_model couldn't find attribute {attr}") + + model = model.module + return getattr(model, attr) + +def get_model_type(model): + return get_attr_wrapped_model(model, 'model_type') + +def get_model_config(model): + args = get_args() + if args.deepspeed: + return get_attr_wrapped_model(model.module, 'config', allow_none=False) + return get_attr_wrapped_model(model, 'config', allow_none=False) + +class GlobalMemoryBuffer: + """Global buffer to avoid dynamic memory allocations. + Caller should ensure that buffers of the same name + are not used concurrently.""" + + def __init__(self): + self.buffer = {} + + def get_tensor(self, tensor_shape, dtype, name): + required_len = reduce(operator.mul, tensor_shape, 1) + if self.buffer.get((name, dtype), None) is None or \ + self.buffer[(name, dtype)].numel() < required_len: + self.buffer[(name, dtype)] = \ + torch.empty(required_len, + dtype=dtype, + device=get_accelerator().current_device_name(), + requires_grad=False) + + return self.buffer[(name, dtype)][0:required_len].view(*tensor_shape) + +def _kernel_make_viewless_tensor(inp, requires_grad): + '''Make a viewless tensor. + + View tensors have the undesirable side-affect of retaining a reference + to the originally-viewed tensor, even after manually setting the '.data' + field. This method creates a new tensor that links to the old tensor's + data, without linking the viewed tensor, referenced via the '._base' + field. + ''' + out = torch.empty( + (1,), + dtype = inp.dtype, + device = inp.device, + requires_grad = requires_grad, + ) + out.data = inp.data + return out + +class MakeViewlessTensor(torch.autograd.Function): + ''' + Autograd function to make a viewless tensor. + + This function should be used in cases where the computation graph needs + to be propagated, but we only want a viewless tensor (e.g., + ParallelTransformer's hidden_states). Call this function by passing + 'keep_graph = True' to 'make_viewless_tensor()'. + ''' + @staticmethod + def forward(ctx, inp, requires_grad): + return _kernel_make_viewless_tensor(inp, requires_grad) + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + +def make_viewless_tensor(inp, requires_grad, keep_graph): + ''' + Entry-point for creating viewless tensors. + + This method should be used, rather than calling 'MakeViewlessTensor' + or '_kernel_make_viewless_tensor' directly. This method acts as a + switch for determining if an autograd function or a regular method + should be used to create the tensor. + ''' + + # return tensor as-is, if not a 'view' + if inp._base is None: + return inp + + # create viewless tensor + if keep_graph: + return MakeViewlessTensor.apply(inp, requires_grad) + else: + return _kernel_make_viewless_tensor(inp, requires_grad) + +def assert_viewless_tensor(tensor, extra_msg = None): + '''Assert that a tensor is not a view (i.e., its '._base' field is + not set).''' + if isinstance(tensor, list): + [ assert_viewless_tensor(t) for t in tensor ] + return tensor + if not isinstance(tensor, torch.Tensor): + return tensor + assert tensor._base is None, ( + "Ensure tensor._base is None before setting tensor.data or storing " + "tensor to memory buffer. Otherwise, a memory leak will occur (and " + "likely accumulate over iterations). %s" + ) % extra_msg + return tensor + +def safely_set_viewless_tensor_data(tensor, new_data_tensor): + '''Safely set tensor's '.data' field. + + Check first that the tensor is viewless (i.e., '._base' not set). If not, + raise an exception. + ''' + assert_viewless_tensor(tensor, extra_msg = "FYI, tensor._base has shape %s, and new_data_tensor has shape %s." % ("--" if tensor._base is None else tensor._base.shape, new_data_tensor.shape)) + tensor.data = new_data_tensor + +def init_method_normal(sigma): + """Init method based on N(0, sigma).""" + + def init_(tensor): + return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) + + return init_ + + +def scaled_init_method_normal(sigma, num_layers): + """Init method based on N(0, sigma/sqrt(2*num_layers).""" + std = sigma / math.sqrt(2.0 * num_layers) + + def init_(tensor): + return torch.nn.init.normal_(tensor, mean=0.0, std=std) + + return init_ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/__init__.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a7c3da3f2e913d9be178c99b4288b2ec20977e8 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/blendable_dataset.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/blendable_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abe7b05c6eeebff0e8e241766707bef190155135 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/blendable_dataset.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/dataset_utils.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/dataset_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2af15f7307ebfcb3fb010b7c4afa1702e2398fe5 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/dataset_utils.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/indexed_dataset.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/indexed_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea287989dc224f6ec3a3e66292c2255bf9e6e4a6 Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/indexed_dataset.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/vit_dataset.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/vit_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e44ad19c5f6001a64d23d40c7d99bb7226de53a Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/__pycache__/vit_dataset.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/gpt_dataset.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/gpt_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..dc4844bcc14e3dacf4930521562f7975110e10f9 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/gpt_dataset.py @@ -0,0 +1,664 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""GPT style dataset.""" + +import hashlib +import os +import time + +import numpy as np +import torch +from deepspeed.accelerator import get_accelerator +from megatron import print_rank_0, is_rank_0, get_args +from megatron.core import mpu +from megatron.data.blendable_dataset import BlendableDataset +from megatron.data.dataset_utils import get_datasets_weights_and_num_samples +from megatron.data.dataset_utils import get_train_valid_test_split_ +from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset + + +def build_train_valid_test_datasets(data_prefix, data_impl, splits_string, + train_valid_test_num_samples, + seq_length, seed, skip_warmup, + train_data_prefix=None, + valid_data_prefix=None, + test_data_prefix=None, + return_doc_ids=False, *, + data_cache_path=None, + use_seq_len_plus_one_tokens=True): + """Build train, valid, and test datasets.""" + + if data_prefix: + print_rank_0("Single data path provided for train, valid & test") + + # Single dataset. + if len(data_prefix) == 1: + return _build_train_valid_test_datasets(data_prefix[0], + data_impl, splits_string, + train_valid_test_num_samples, + seq_length, seed, skip_warmup, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, + train_valid_test_num_samples) + prefixes, weights, datasets_train_valid_test_num_samples = output + train_num_samples, valid_num_samples, test_num_samples = map( + sum, + zip(*datasets_train_valid_test_num_samples) + ) + + # Build individual datasets. + train_datasets = [] + valid_datasets = [] + test_datasets = [] + for i in range(len(prefixes)): + train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( + prefixes[i], data_impl, splits_string, + datasets_train_valid_test_num_samples[i], + seq_length, seed, skip_warmup, + return_doc_ids, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + if train_ds: + train_datasets.append(train_ds) + if valid_ds: + valid_datasets.append(valid_ds) + if test_ds: + test_datasets.append(test_ds) + + # Blend. + blending_train_dataset = None + if train_datasets: + blending_train_dataset = BlendableDataset(train_datasets, weights, train_num_samples, + data_cache_path=data_cache_path) + blending_valid_dataset = None + if valid_datasets: + blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_num_samples, + data_cache_path=data_cache_path) + blending_test_dataset = None + if test_datasets: + blending_test_dataset = BlendableDataset(test_datasets, weights, test_num_samples, + data_cache_path=data_cache_path) + + return (blending_train_dataset, blending_valid_dataset, + blending_test_dataset) + + else: + print_rank_0("Separate data paths provided for train, valid & test. Split string will be ignored.") + + train_dataset, valid_dataset, test_dataset = None, None, None + # Single dataset. + if train_data_prefix is not None: + train_dataset = build_dataset("train", train_data_prefix, data_impl, + splits_string, + train_valid_test_num_samples[0], + seq_length, seed, skip_warmup, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + + if valid_data_prefix is not None: + valid_dataset = build_dataset("valid", valid_data_prefix, data_impl, + splits_string, + train_valid_test_num_samples[1], + seq_length, seed, False, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + + + if test_data_prefix is not None: + test_dataset = build_dataset("test", test_data_prefix, data_impl, + splits_string, + train_valid_test_num_samples[2], + seq_length, seed, False, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + + return (train_dataset, valid_dataset, test_dataset) + + +def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string, + train_valid_test_num_samples, + seq_length, seed, skip_warmup, + return_doc_ids=False, *, + data_cache_path=None, + use_seq_len_plus_one_tokens): + """Build train, valid, and test datasets.""" + + # Indexed dataset. + indexed_dataset = get_indexed_dataset_(data_prefix, + data_impl, + skip_warmup) + + total_num_of_documents = indexed_dataset.sizes.shape[0] + splits = get_train_valid_test_split_(splits_string, total_num_of_documents) + + # Print stats about the splits. + print_rank_0(' > dataset split:') + + def print_split_stats(name, index): + print_rank_0(' {}:'.format(name)) + print_rank_0(' document indices in [{}, {}) total of {} ' + 'documents'.format(splits[index], splits[index + 1], + splits[index + 1] - splits[index])) + print_split_stats('train', 0) + print_split_stats('validation', 1) + print_split_stats('test', 2) + + def build_dataset(index, name): + dataset = None + if splits[index + 1] > splits[index]: + documents = np.arange(start=splits[index], stop=splits[index + 1], + step=1, dtype=np.int32) + dataset = GPTDataset(name, data_prefix, documents, indexed_dataset, + splits_string, + train_valid_test_num_samples[index], + seq_length, seed, + return_doc_ids, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + return dataset + + train_dataset = build_dataset(0, 'train') + valid_dataset = build_dataset(1, 'valid') + test_dataset = build_dataset(2, 'test') + + return (train_dataset, valid_dataset, test_dataset) + + +def build_dataset(dataset_name, data_prefix, data_impl, + splits_string, num_samples, + seq_length, seed, skip_warmup, + *, + data_cache_path=None, + use_seq_len_plus_one_tokens=True): + dataset = None + if len(data_prefix) == 1: + dataset = _build_dataset(dataset_name, data_prefix[0], data_impl, + splits_string, num_samples, seq_length, + seed, skip_warmup, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + else: + # Blending dataset. + # Parse the values. + output = get_datasets_weights_and_num_samples(data_prefix, num_samples) + prefixes, weights, dataset_num_samples = output + num_samples = sum(dataset_num_samples) + + # Build individual datasets. + datasets = [] + for i in range(len(prefixes)): + ds = _build_dataset(dataset_name, prefixes[i], data_impl, + splits_string, dataset_num_samples[i], + seq_length, seed, skip_warmup, + data_cache_path=data_cache_path, + use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + if ds: + datasets.append(ds) + + if datasets: + dataset = BlendableDataset(datasets, weights, num_samples, + data_cache_path=data_cache_path) + + return dataset + + +def _build_dataset(dataset_name, data_prefix, data_impl, splits_string, + num_samples, seq_length, seed, skip_warmup, + *, + data_cache_path=None, + use_seq_len_plus_one_tokens=True): + """ + Build dataset. This method is called when individual + train, valid, test datasets are provided + """ + + # Indexed dataset. + indexed_dataset = get_indexed_dataset_(data_prefix, + data_impl, + skip_warmup) + + total_num_of_documents = indexed_dataset.sizes.shape[0] + + print_rank_0(' {}:'.format(dataset_name)) + print_rank_0(' document indices in [0, {}) total of {} ' + 'documents'.format(total_num_of_documents, total_num_of_documents)) + + documents = np.arange(start=0, stop=total_num_of_documents, + step=1, dtype=np.int32) + + dataset = GPTDataset(dataset_name, data_prefix, documents, indexed_dataset, + splits_string, num_samples, seq_length, seed, + data_cache_path=data_cache_path, use_seq_len_plus_one_tokens=use_seq_len_plus_one_tokens) + + return dataset + + +def get_indexed_dataset_(data_prefix, data_impl, skip_warmup): + """Build indexed dataset.""" + print_rank_0(' > building dataset index ...') + + start_time = time.time() + indexed_dataset = make_indexed_dataset(data_prefix, + data_impl, + skip_warmup) + print_rank_0(' > finished creating indexed dataset in {:4f} ' + 'seconds'.format(time.time() - start_time)) + print_rank_0(' number of documents: {}'.format( + indexed_dataset.sizes.shape[0])) + + return indexed_dataset + + +class GPTDataset(torch.utils.data.Dataset): + + def __init__(self, name, data_prefix, documents, indexed_dataset, + splits_string, num_samples, seq_length, seed, + return_doc_ids=False, *, + data_cache_path=None, + use_seq_len_plus_one_tokens): + + self.name = name + self.indexed_dataset = indexed_dataset + self.return_doc_ids = return_doc_ids + self.seq_length = seq_length + self.add_extra_token = 0 + if use_seq_len_plus_one_tokens: + self.add_extra_token = 1 + + # Checks + assert np.min(documents) >= 0 + assert np.max(documents) < indexed_dataset.sizes.shape[0] + + # Build index mappings. + self.doc_idx, self.sample_idx, self.shuffle_idx, self.desc, self.desc_hash = \ + _build_index_mappings(self.name, data_prefix, + documents, self.indexed_dataset.sizes, + splits_string, num_samples, seq_length, seed, + data_cache_path=data_cache_path, add_extra_token=self.add_extra_token) + + + def __len__(self): + # -1 is due to data structure used to retieve the index: + # sample i --> [sample_idx[i], sample_idx[i+1]) + return self.sample_idx.shape[0] - 1 + + def __getitem__(self, idx): + args = get_args() + dummy_sample = idx < 0 + idx = np.abs(idx) + orig_idx = idx + # Get the shuffled index. + idx = self.shuffle_idx[idx] + # Start and end documents and offsets. + doc_index_f = self.sample_idx[idx][0] + doc_index_l = self.sample_idx[idx + 1][0] + offset_f = self.sample_idx[idx][1] + offset_l = self.sample_idx[idx + 1][1] + # If we are within the same document, just extract the chunk. + doc_ids = [] + if doc_index_f == doc_index_l: + doc_ids.append(self.doc_idx[doc_index_f]) + sample = self.indexed_dataset.get(self.doc_idx[doc_index_f], + offset=offset_f, + length=offset_l - offset_f + self.add_extra_token) + else: + # Otherwise, get the rest of the initial document. + doc_ids.append(self.doc_idx[doc_index_f]) + sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], + offset=offset_f)] + # Loop over all in between documents and add the entire document. + for i in range(doc_index_f + 1, doc_index_l): + doc_ids.append(self.doc_idx[i]) + sample_list.append(self.indexed_dataset.get(self.doc_idx[i])) + # And finally add the relevant portion of last document. + doc_ids.append(self.doc_idx[doc_index_l]) + sample_list.append(self.indexed_dataset.get( + self.doc_idx[doc_index_l], + length=offset_l + self.add_extra_token)) + sample = np.concatenate(sample_list) + + text_name = 'text' + if args.use_dataset_only: + text_name = 'input_ids' + sample_dict = {text_name: np.array(sample, dtype=np.int64)} + if args.return_data_index: + sample_dict.update({'index': np.array([orig_idx], dtype=np.int64)}) + + if self.return_doc_ids: # for retro preprocessing + sample_dict.update({'doc_ids': np.array(doc_ids, dtype=np.int64)}) + + if args.use_dataset_only: + sample_dict.update({'labels': np.array(sample, dtype=np.int64)}) + + if len(sample) != (self.seq_length + self.add_extra_token): + sample = np.array(sample, dtype=np.int64) + sample = np.pad(sample, (0, self.seq_length + self.add_extra_token - len(sample)), mode='constant', constant_values=-1) + + if args.return_data_index: + return {'text': np.array(sample, dtype=np.int64), + 'index': np.array([orig_idx], dtype=np.int64)} + elif self.return_doc_ids: # for retro preprocessing + return {'text': np.array(sample, dtype=np.int64), + 'doc_ids': np.array(doc_ids, dtype=np.int64)} + else: + return {'text': np.array(sample, dtype=np.int64), + 'dummy_sample': np.array(int(dummy_sample), dtype=np.int64)} + + return sample_dict + +def _build_index_mappings(name, data_prefix, documents, sizes, + splits_string, num_samples, seq_length, seed, + *, + data_cache_path, add_extra_token): + """Build doc-idx, sample-idx, and shuffle-idx. + doc-idx: is an array (ordered) of documents to be used in training. + sample-idx: is the start document index and document offset for each + training sample. + shuffle-idx: maps the sample index into a random index into sample-idx. + """ + args = get_args() + # Number of tokens in each epoch and number of required epochs. + tokens_per_epoch = _num_tokens(documents, sizes) + num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token) + if num_samples < 0: + print_num_samples = tokens_per_epoch // seq_length + else: + print_num_samples = num_samples + if args.train_data_exact_num_epochs is not None and name == 'train': + num_epochs = args.train_data_exact_num_epochs + + # rng state + np_rng = np.random.RandomState(seed=seed) + + # Filename of the index mappings. + desc = "GPT Dataset\n\n" + desc += f"Data prefix {data_prefix}\n" + desc += f"Dataset name {name}\n" + desc += f"Number of samples {print_num_samples}\n" + desc += f"Number of epochs {num_epochs}\n" + desc += f"Sequence length {seq_length}\n" + desc += f"Random seed {seed}\n" + desc += f"Split {splits_string}\n" + desc_hash = hashlib.md5(desc.encode('utf-8')).hexdigest() + desc_filename = desc_hash + ".dsc" + doc_idx_filename = desc_hash + '_doc_idx.npy' + sample_idx_filename = desc_hash + '_sample_idx.npy' + shuffle_idx_filename = desc_hash + '_shuffle_idx.npy' + + if name == 'train': + # force to use certain index files + if args.train_desc_path is not None: + desc_filename = args.train_desc_path + if args.train_doc_idx_path is not None: + doc_idx_filename = args.train_doc_idx_path + if args.train_sample_idx_path is not None: + sample_idx_filename = args.train_sample_idx_path + if args.train_shuffle_idx_path is not None: + shuffle_idx_filename = args.train_shuffle_idx_path + + # Look for cache in main data dir first to avoid unnecessary + # duplication, then look in data-cache-path if specified, + # If nothing is found, use the last path looked in + build_indices = True + prefixes = [os.path.join(os.path.dirname(data_prefix), 'index-cache')] + if data_cache_path is not None: + prefixes.append(data_cache_path) + for prefix in prefixes: + idx_path = { + 'desc': os.path.join(prefix, desc_filename), + 'doc': os.path.join(prefix, doc_idx_filename), + 'sample': os.path.join(prefix, sample_idx_filename), + 'shuffle': os.path.join(prefix, shuffle_idx_filename) + } + for f in idx_path.values(): + if not os.path.isfile(f): + break + else: + # Found our files! + build_indices = False + break + data_cache_dir = os.path.dirname(idx_path['desc']) + data_cache_success = True + + # Build the indexed mapping if not exist. + if build_indices and is_rank_0(): + print_rank_0(' > WARNING: could not find index map files, building ' + 'the indices on rank 0 ...') + + # For the last epoch, decide whether include the entire epoch + # in the global shuffle or not. + + # If we need only one epoch, then separating last epoch does + # not mean anything. + if num_epochs == 1: + separate_last_epoch = False + print(' > only one epoch required, setting ' + 'separate_last_epoch to False', flush=True) + + else: + # Get the number of samples for the last epoch + assert num_samples >= 0, 'number of samples should be non-negative' + num_samples_from_epochs_minus_one = ( + (num_epochs - 1) * tokens_per_epoch - add_extra_token) // seq_length + last_epoch_num_samples = num_samples - \ + num_samples_from_epochs_minus_one + assert last_epoch_num_samples >= 0, \ + 'last epoch number of samples should be non-negative.' + num_samples_per_epoch = (tokens_per_epoch - add_extra_token) // seq_length + assert last_epoch_num_samples <= (num_samples_per_epoch + 1), \ + 'last epoch number of samples exceeded max value.' + # If we have less than 80% of the samples for the last epoch, + # seperate out the epoch and treat it differently. + # Note: the 80% number is just based on common sense and can + # be adjusted if needed. + separate_last_epoch = (last_epoch_num_samples < + int(0.80 * num_samples_per_epoch)) + if separate_last_epoch: + string = ' > last epoch number of samples ({}) is smaller '\ + 'than 80% of number of samples per epoch ({}), '\ + 'setting separate_last_epoch to True' + else: + string = ' > last epoch number of samples ({}) is larger '\ + 'than 80% of number of samples per epoch ({}), '\ + 'setting separate_last_epoch to False' + print(string.format(last_epoch_num_samples, + num_samples_per_epoch), flush=True) + + + try: + os.makedirs(data_cache_dir, exist_ok=True) + + # description + with open(idx_path['desc'], 'wt') as fd: + fd.write(desc) + + # doc-idx. + start_time = time.time() + doc_idx = _build_doc_idx(documents, num_epochs, np_rng, + separate_last_epoch) + np.save(idx_path['doc'], doc_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save doc-idx mapping ' + '(seconds): {:4f}'.format(time.time() - start_time)) + # sample-idx. + start_time = time.time() + # Use C++ implementation for speed. + # First compile and then import. + from megatron.data import helpers + assert doc_idx.dtype == np.int32 + assert sizes.dtype == np.int32 + sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length, + num_epochs, tokens_per_epoch, + num_samples < 0, add_extra_token) + np.save(idx_path['sample'], sample_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save sample-idx mapping ' + '(seconds): {:4f}'.format(time.time() - start_time)) + # shuffle-idx. + start_time = time.time() + # -1 is due to data structure used to retieve the index: + # sample i --> [sample_idx[i], sample_idx[i+1]) + if separate_last_epoch: + num_samples_ = num_samples_from_epochs_minus_one + else: + num_samples_ = sample_idx.shape[0] - 1 + shuffle_idx = _build_shuffle_idx(num_samples_, + sample_idx.shape[0] - 1, np_rng) + np.save(idx_path['shuffle'], shuffle_idx, allow_pickle=True) + print_rank_0(' > elasped time to build and save shuffle-idx mapping' + ' (seconds): {:4f}'.format(time.time() - start_time)) + except OSError: + print(f'There was an error trying to create the data cache directory ({data_cache_dir})') + print('or a file in it. This defaults to a directory "index-cache" within the directory') + print('the data files are in and can be set with the --data-cache-path argument. Please') + print('ensure you have write access to this directory or specify one that you do have') + print('write access to.') + data_cache_success = False + + counts = get_accelerator().LongTensor([data_cache_success]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group()) + if counts[0].item() != ( + torch.distributed.get_world_size() // + torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()) // + torch.distributed.get_world_size(group=mpu.get_sequence_parallel_group())): + print_rank_0("Data index creation unsuccessful, exiting.") + exit() + + # Load mappings. + start_time = time.time() + print_rank_0(f" > loading doc-idx mapping from {idx_path['doc']}") + doc_idx = np.load(idx_path['doc'], allow_pickle=True, mmap_mode='r') + + print_rank_0(f" > loading sample-idx mapping from {idx_path['sample']}") + sample_idx = np.load(idx_path['sample'], allow_pickle=True, mmap_mode='r') + + print_rank_0(f" > loading shuffle-idx mapping from {idx_path['shuffle']}") + shuffle_idx = np.load(idx_path['shuffle'], allow_pickle=True, mmap_mode='r') + + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of samples: {}'.format( + sample_idx.shape[0])) + print_rank_0(' total number of epochs: {}'.format(num_epochs)) + + return doc_idx, sample_idx, shuffle_idx, desc, desc_hash + + +def _num_tokens(documents, sizes): + """Total number of tokens in the dataset.""" + return np.sum(sizes[documents]) + + +def _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token): + """Based on number of samples and sequence lenght, calculate how many + epochs will be needed.""" + num_epochs = 0 + total_tokens = 0 + while True: + num_epochs += 1 + total_tokens += tokens_per_epoch + # -1 is because we need to retrieve seq_length + 1 token each time + # but the last token will overlap with the first token of the next + # sample except for the last sample. + if ((total_tokens - add_extra_token) // seq_length) >= num_samples: + return num_epochs + + +def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch): + """Build an array with length = number-of-epochs * number-of-dcuments. + Each index is mapped to a corresponding document.""" + if not separate_last_epoch or num_epochs == 1: + doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1] + doc_idx[:] = documents + doc_idx = doc_idx.reshape(-1) + doc_idx = doc_idx.astype(np.int32) + np_rng.shuffle(doc_idx) + return doc_idx + + doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False) + doc_idx_last = _build_doc_idx(documents, 1, np_rng, False) + return np.concatenate((doc_idx_first, doc_idx_last)) + + +def _build_sample_idx(sizes, doc_idx, seq_length, + num_epochs, tokens_per_epoch, + keep_last_sequence, add_extra_token): + """Sample index mapping is a 2D array with sizes + [number-of-samples + 1, 2] where [..., 0] contains + the index into `doc_idx` and [..., 1] is the + starting offset in that document.""" + + # Total number of samples. For -1 see comments in `_num_epochs`. + if keep_last_sequence: + import math + num_samples = math.ceil((num_epochs * tokens_per_epoch - add_extra_token) / seq_length) + else: + num_samples = (num_epochs * tokens_per_epoch - add_extra_token) // seq_length + sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32) + + # Index into sample_idx. + sample_index = 0 + # Index into doc_idx. + doc_idx_index = 0 + # Begining offset for each document. + doc_offset = 0 + # Start with first document and no offset. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + while sample_index <= num_samples: + # Start with a fresh sequence. + remaining_seq_length = seq_length + add_extra_token + while remaining_seq_length != 0: + # Get the document length. + doc_id = doc_idx[doc_idx_index] + doc_length = sizes[doc_id] - doc_offset + # And add it to the current sequence. + remaining_seq_length -= doc_length + # If we have more than a full sequence, adjust offset and set + # remaining length to zero so we return from the while loop. + # Note that -1 here is for the same reason we have -1 in + # `_num_epochs` calculations. + if remaining_seq_length <= 0: + doc_offset += (remaining_seq_length + doc_length - add_extra_token) + remaining_seq_length = 0 + else: + # Otherwise, start from the begining of the next document. + if doc_idx_index == (len(doc_idx) - 1): + assert sample_index == num_samples, F"sample_index={sample_index} and num_samples={num_samples} should be the same" + doc_offset = sizes[doc_idx[doc_idx_index]] - add_extra_token + break + doc_idx_index += 1 + doc_offset = 0 + # Record the sequence. + sample_idx[sample_index][0] = doc_idx_index + sample_idx[sample_index][1] = doc_offset + sample_index += 1 + + return sample_idx + + +def _build_shuffle_idx(num_samples, total_size, np_rng): + """Build the range [0, size) and shuffle.""" + print(' > building shuffle index with split [0, {}) and [{}, {}) ' + '...'.format(num_samples, num_samples, total_size), flush=True) + + dtype_ = np.uint32 + if total_size >= (np.iinfo(np.uint32).max - 1): + dtype_ = np.int64 + + shuffle_idx_first = np.arange(start=0, stop=num_samples, + step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx_first) + if num_samples == total_size: + return shuffle_idx_first + + shuffle_idx_last = np.arange(start=num_samples, stop=total_size, + step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx_last) + + return np.concatenate((shuffle_idx_first, shuffle_idx_last)) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/ict_dataset.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/ict_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..6dac35ff9d413898146df5c1cc8553719e142105 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/ict_dataset.py @@ -0,0 +1,156 @@ +import itertools +import random + +import numpy as np +from torch.utils.data import Dataset + +from megatron import get_tokenizer +from megatron import get_args +from megatron.data.dataset_utils import get_indexed_dataset_ +from megatron.data.realm_dataset_utils import get_block_samples_mapping + +def make_attention_mask(source_block, target_block): + """ + Returns a 2-dimensional (2-D) attention mask + :param source_block: 1-D array + :param target_block: 1-D array + """ + mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) + mask = mask.astype(np.int64) + # (source_length, target_length) + return mask + +def get_ict_dataset(use_titles=True, query_in_block_prob=1): + """Get a dataset which uses block samples mappings to get ICT/block indexing data (via get_block()) + rather than for training, since it is only built with a single epoch sample mapping. + """ + args = get_args() + block_dataset = get_indexed_dataset_(args.data_path, 'mmap', True) + titles_dataset = get_indexed_dataset_(args.titles_data_path, 'mmap', True) + + kwargs = dict( + name='full', + block_dataset=block_dataset, + title_dataset=titles_dataset, + data_prefix=args.data_path, + num_epochs=1, + max_num_samples=None, + max_seq_length=args.seq_length, + seed=1, + query_in_block_prob=query_in_block_prob, + use_titles=use_titles, + use_one_sent_docs=args.use_one_sent_docs + ) + dataset = ICTDataset(**kwargs) + return dataset + + +class ICTDataset(Dataset): + """Dataset containing sentences and their blocks for an inverse cloze task.""" + def __init__(self, name, block_dataset, title_dataset, data_prefix, + num_epochs, max_num_samples, max_seq_length, query_in_block_prob, + seed, use_titles=True, use_one_sent_docs=False, binary_head=False): + self.name = name + self.seed = seed + self.max_seq_length = max_seq_length + self.query_in_block_prob = query_in_block_prob + self.block_dataset = block_dataset + self.title_dataset = title_dataset + self.rng = random.Random(self.seed) + self.use_titles = use_titles + self.use_one_sent_docs = use_one_sent_docs + + self.samples_mapping = get_block_samples_mapping( + block_dataset, title_dataset, data_prefix, num_epochs, + max_num_samples, max_seq_length, seed, name, use_one_sent_docs) + self.tokenizer = get_tokenizer() + self.vocab_id_list = list(self.tokenizer.inv_vocab.keys()) + self.vocab_id_to_token_list = self.tokenizer.inv_vocab + self.cls_id = self.tokenizer.cls + self.sep_id = self.tokenizer.sep + self.mask_id = self.tokenizer.mask + self.pad_id = self.tokenizer.pad + + def __len__(self): + return len(self.samples_mapping) + + def __getitem__(self, idx): + """Get an ICT example of a pseudo-query and the block of text from which it was extracted""" + sample_data = self.samples_mapping[idx] + start_idx, end_idx, doc_idx, block_idx = sample_data.as_tuple() + + if self.use_titles: + title = self.title_dataset[int(doc_idx)] + title_pad_offset = 3 + len(title) + else: + title = None + title_pad_offset = 2 + block = [self.block_dataset[i] for i in range(start_idx, end_idx)] + assert len(block) > 1 or self.use_one_sent_docs or self.query_in_block_prob == 1 + + # randint() is inclusive for Python rng + rand_sent_idx = self.rng.randint(0, len(block) - 1) + + # keep the query in the context query_in_block_prob fraction of the time. + if self.rng.random() < self.query_in_block_prob: + query = block[rand_sent_idx].copy() + else: + query = block.pop(rand_sent_idx) + + # still need to truncate because blocks are concluded when + # the sentence lengths have exceeded max_seq_length. + query = query[:self.max_seq_length - 2] + block = list(itertools.chain(*block))[:self.max_seq_length - title_pad_offset] + + query_tokens, query_pad_mask = self.concat_and_pad_tokens(query) + context_tokens, context_pad_mask = self.concat_and_pad_tokens(block, title) + + query_mask = make_attention_mask(query_tokens, query_tokens) + context_mask = make_attention_mask(context_tokens, context_tokens) + + block_data = sample_data.as_array() + + sample = { + 'query_tokens': query_tokens, + 'query_mask': query_mask, + 'query_pad_mask': query_pad_mask, + 'context_tokens': context_tokens, + 'context_mask': context_mask, + 'context_pad_mask': context_pad_mask, + 'block_data': block_data, + } + + return sample + + def get_block(self, start_idx, end_idx, doc_idx): + """Get the IDs for an evidence block plus the title of the corresponding document""" + block = [self.block_dataset[i] for i in range(start_idx, end_idx)] + title = self.title_dataset[int(doc_idx)] + + block = list(itertools.chain(*block))[:self.max_seq_length - (3 + len(title))] + block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) + + return block_tokens, block_pad_mask + + def get_null_block(self): + """Get empty block and title - used in REALM pretraining""" + block, title = [], [] + block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) + + return block_tokens, block_pad_mask + + def concat_and_pad_tokens(self, tokens, title=None): + """Concat with special tokens and pad sequence to self.max_seq_length""" + tokens = list(tokens) + if title is None: + tokens = [self.cls_id] + tokens + [self.sep_id] + else: + title = list(title) + tokens = [self.cls_id] + title + [self.sep_id] + tokens + [self.sep_id] + assert len(tokens) <= self.max_seq_length + + num_pad = self.max_seq_length - len(tokens) + pad_mask = [1] * len(tokens) + [0] * num_pad + tokens += [self.pad_id] * num_pad + + return np.array(tokens), np.array(pad_mask) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/orqa_wiki_dataset.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/orqa_wiki_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4019cd764c204b34de0df28bfcc7969c3c19d937 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/orqa_wiki_dataset.py @@ -0,0 +1,193 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Wikipedia dataset from DPR code for ORQA.""" + +from abc import ABC +import csv +import numpy as np +import random +import torch +from torch.utils.data import Dataset + +from megatron import print_rank_0, get_args, get_tokenizer +from megatron.core import tensor_parallel +from megatron.data.biencoder_dataset_utils import make_attention_mask + +def get_open_retrieval_wiki_dataset(): + args = get_args() + tokenizer = get_tokenizer() + + dataset = OpenRetrievalEvidenceDataset('2018 Wikipedia from DPR codebase', + 'evidence', + args.evidence_data_path, + tokenizer, + args.retriever_seq_length) + return dataset + + +def get_open_retrieval_batch(data_iterator): + # Items and their type. + keys = ['row_id', 'context', 'context_mask', 'context_types', + 'context_pad_mask'] + datatype = torch.int64 + + # Broadcast data. + data = None if data_iterator is None else next(data_iterator) + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + row_id = data_b['row_id'].long() + context = data_b['context'].long() + + # TODO: make the context mask a binary one + context_mask = (data_b['context_mask'] < 0.5) + + context_types = data_b['context_types'].long() + context_pad_mask = data_b['context_pad_mask'].long() + + return row_id, context, context_mask, context_types, context_pad_mask + + +def build_tokens_types_paddings_from_text(row, tokenizer, max_seq_length): + """Build token types and paddings, trim if needed, and pad if needed.""" + + title_ids = tokenizer.tokenize(row['title']) + context_ids = tokenizer.tokenize(row['text']) + + # Appending the title of the context at front + extended_context_ids = title_ids + [tokenizer.sep_id] + context_ids + + context_ids, context_types, context_pad_mask = \ + build_tokens_types_paddings_from_ids(extended_context_ids, + max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad) + + return context_ids, context_types, context_pad_mask + + +# noinspection DuplicatedCode +def build_tokens_types_paddings_from_ids(text_ids, max_seq_length, + cls_id, sep_id, pad_id): + """Build token types and paddings, trim if needed, and pad if needed.""" + enc_ids = [] + tokentypes_enc = [] + + # [CLS]. + enc_ids.append(cls_id) + tokentypes_enc.append(0) + + # A. + len_src = len(text_ids) + enc_ids.extend(text_ids) + tokentypes_enc.extend([0] * len_src) + + # Cap the size. + if len(enc_ids) > max_seq_length - 1: + enc_ids = enc_ids[0: max_seq_length - 1] + tokentypes_enc = tokentypes_enc[0: max_seq_length - 1] + + # [SEP]. + enc_ids.append(sep_id) + tokentypes_enc.append(0) + + num_tokens_enc = len(enc_ids) + # Padding. + padding_length = max_seq_length - len(enc_ids) + if padding_length > 0: + enc_ids.extend([pad_id] * padding_length) + tokentypes_enc.extend([pad_id] * padding_length) + + pad_mask = ([1] * num_tokens_enc) + ([0] * padding_length) + pad_mask = np.array(pad_mask, dtype=np.int64) + + return enc_ids, tokentypes_enc, pad_mask + + +def build_sample(row_id, context_ids, context_types, context_pad_mask): + """Convert to numpy and return a sample consumed by the batch producer.""" + + context_ids = np.array(context_ids, dtype=np.int64) + context_types = np.array(context_types, dtype=np.int64) + context_mask = make_attention_mask(context_ids, context_ids) + + sample = ({ + 'row_id': row_id, + 'context': context_ids, + 'context_mask': context_mask, + 'context_types': context_types, + 'context_pad_mask': context_pad_mask + }) + return sample + + +class OpenRetrievalEvidenceDataset(ABC, Dataset): + """Open Retrieval Evidence dataset class.""" + + def __init__(self, task_name, dataset_name, datapath, tokenizer, + max_seq_length): + # Store inputs. + self.task_name = task_name + self.dataset_name = dataset_name + self.tokenizer = tokenizer + self.max_seq_length = max_seq_length + print_rank_0(' > building {} dataset for {}:'.format(self.task_name, + self.dataset_name)) + # Process the files. + print_rank_0(datapath) + self.samples, self.id2text = self.process_samples_from_single_path( + datapath) + + args = get_args() + if args.sample_rate < 1: # subsample + k = int(len(self.samples) * args.sample_rate) + self.samples = random.sample(self.samples, k) + + print_rank_0(' >> total number of samples: {}'.format( + len(self.samples))) + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + row = self.samples[idx] + + context_ids, context_types, context_pad_mask = \ + build_tokens_types_paddings_from_text(row, self.tokenizer, + self.max_seq_length) + + sample = build_sample(row['doc_id'], + context_ids, + context_types, + context_pad_mask) + return sample + + @staticmethod + def process_samples_from_single_path(filename): + print_rank_0(' > Processing {} ...'.format(filename)) + total = 0 + + rows = [] + id2text = {} + + with open(filename) as tsvfile: + reader = csv.reader(tsvfile, delimiter='\t') + next(reader, None) # skip the headers + for row in reader: + # file format: doc_id, doc_text, title + doc_id = int(row[0]) + text = row[1] + title = row[2] + + rows.append({'doc_id': doc_id, + 'text': text, + 'title': title}) + + assert doc_id not in id2text + id2text[doc_id] = (text, title) + + total += 1 + if total % 100000 == 0: + print_rank_0(' > processed {} rows so far ...'.format( + total)) + + print_rank_0(' >> processed {} samples.'.format(len(rows))) + return rows, id2text diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/realm_dataset_utils.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/realm_dataset_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3cd55683f07ada093a0e446cac5b72705a5f29 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/realm_dataset_utils.py @@ -0,0 +1,200 @@ +import os +import time + +import numpy as np +import torch + +from megatron import print_rank_0 +from megatron.core import mpu, tensor_parallel +from megatron.data.dataset_utils import create_masked_lm_predictions, pad_and_convert_to_numpy +from megatron import get_args, get_tokenizer, print_rank_0 +from deepspeed.accelerator import get_accelerator + + +def get_one_epoch_dataloader(dataset, micro_batch_size=None): + """Specifically one epoch to be used in an indexing job.""" + args = get_args() + + world_size = mpu.get_data_parallel_world_size() + rank = mpu.get_data_parallel_rank() + if micro_batch_size is None: + micro_batch_size = args.micro_batch_size + global_batch_size = micro_batch_size * world_size + num_workers = args.num_workers + + sampler = torch.utils.data.SequentialSampler(dataset) + # importantly, drop_last must be False to get all the data. + assert False, 'DistributedBatchSampler deprecated, change the implementation' + from megatron.data.samplers import DistributedBatchSampler + batch_sampler = DistributedBatchSampler(sampler, + batch_size=global_batch_size, + drop_last=False, + rank=rank, + world_size=world_size) + + return torch.utils.data.DataLoader(dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + pin_memory=True) + + +def get_ict_batch(data_iterator): + # Items and their type. + keys = ['query_tokens', 'query_pad_mask', + 'block_tokens', 'block_pad_mask', 'block_data'] + datatype = torch.int64 + + # Broadcast data. + if data_iterator is None: + data = None + else: + data = next(data_iterator) + data_b = tensor_parallel.broadcast_data(keys, data, datatype) + + # Unpack. + query_tokens = data_b['query_tokens'].long() + query_pad_mask = data_b['query_pad_mask'].long() + block_tokens = data_b['block_tokens'].long() + block_pad_mask = data_b['block_pad_mask'].long() + block_indices = data_b['block_data'].long() + + return query_tokens, query_pad_mask,\ + block_tokens, block_pad_mask, block_indices + + +def join_str_list(str_list): + """Join a list of strings, handling spaces appropriately""" + result = "" + for s in str_list: + if s.startswith("##"): + result += s[2:] + else: + result += " " + s + return result + + +class BlockSampleData(object): + """A struct for fully describing a fixed-size block of data as used in REALM + + :param start_idx: for first sentence of the block + :param end_idx: for last sentence of the block (may be partially truncated in sample construction) + :param doc_idx: the index of the document from which the block comes in the original indexed dataset + :param block_idx: a unique integer identifier given to every block. + """ + def __init__(self, start_idx, end_idx, doc_idx, block_idx): + self.start_idx = start_idx + self.end_idx = end_idx + self.doc_idx = doc_idx + self.block_idx = block_idx + + def as_array(self): + return np.array([self.start_idx, self.end_idx, self.doc_idx, self.block_idx]).astype(np.int64) + + def as_tuple(self): + return self.start_idx, self.end_idx, self.doc_idx, self.block_idx + + +class BlockSamplesMapping(object): + def __init__(self, mapping_array): + # make sure that the array is compatible with BlockSampleData + assert mapping_array.shape[1] == 4 + self.mapping_array = mapping_array + + def __len__(self): + return self.mapping_array.shape[0] + + def __getitem__(self, idx): + """Get the data associated with an indexed sample.""" + sample_data = BlockSampleData(*self.mapping_array[idx]) + return sample_data + + +def get_block_samples_mapping(block_dataset, title_dataset, data_prefix, num_epochs, + max_num_samples, max_seq_length, seed, name, use_one_sent_docs=False): + """Get samples mapping for a dataset over fixed size blocks. This function also requires + a dataset of the titles for the source documents since their lengths must be taken into account. + + :return: samples_mapping (BlockSamplesMapping) + """ + + if not num_epochs: + if not max_num_samples: + raise ValueError("Need to specify either max_num_samples " + "or num_epochs") + num_epochs = np.iinfo(np.int32).max - 1 + if not max_num_samples: + max_num_samples = np.iinfo(np.int64).max - 1 + + # Filename of the index mapping + indexmap_filename = data_prefix + indexmap_filename += '_{}_indexmap'.format(name) + if num_epochs != (np.iinfo(np.int32).max - 1): + indexmap_filename += '_{}ep'.format(num_epochs) + if max_num_samples != (np.iinfo(np.int64).max - 1): + indexmap_filename += '_{}mns'.format(max_num_samples) + indexmap_filename += '_{}msl'.format(max_seq_length) + indexmap_filename += '_{}s'.format(seed) + if use_one_sent_docs: + indexmap_filename += '_1sentok' + indexmap_filename += '.npy' + + # Build the indexed mapping if not exist. + if mpu.get_data_parallel_rank() == 0 and \ + not os.path.isfile(indexmap_filename): + print(' > WARNING: could not find index map file {}, building ' + 'the indices on rank 0 ...'.format(indexmap_filename)) + + # Make sure the types match the helpers input types. + assert block_dataset.doc_idx.dtype == np.int64 + assert block_dataset.sizes.dtype == np.int32 + + # Build samples mapping + verbose = torch.distributed.get_rank() == 0 + start_time = time.time() + print_rank_0(' > building samples index mapping for {} ...'.format( + name)) + + from megatron.data import helpers + mapping_array = helpers.build_blocks_mapping( + block_dataset.doc_idx, + block_dataset.sizes, + title_dataset.sizes, + num_epochs, + max_num_samples, + max_seq_length - 3, # account for added tokens + seed, + verbose, + use_one_sent_docs) + + + print_rank_0(' > done building samples index mapping') + np.save(indexmap_filename, mapping_array, allow_pickle=True) + print_rank_0(' > saved the index mapping in {}'.format( + indexmap_filename)) + # Make sure all the ranks have built the mapping + print_rank_0(' > elapsed time to build and save samples mapping ' + '(seconds): {:4f}'.format( + time.time() - start_time)) + + # This should be a barrier but nccl barrier assumes + # device_index=rank which is not the case for model + # parallel case + counts = get_accelerator().LongTensor([1]) + torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) + assert counts[0].item() == torch.distributed.get_world_size( + group=mpu.get_data_parallel_group()) + + # Load indexed dataset. + print_rank_0(' > loading indexed mapping from {}'.format( + indexmap_filename)) + start_time = time.time() + + mapping_array = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r') + samples_mapping = BlockSamplesMapping(mapping_array) + + print_rank_0(' loaded indexed file in {:3.3f} seconds'.format( + time.time() - start_time)) + print_rank_0(' total number of samples: {}'.format( + mapping_array.shape[0])) + + return samples_mapping diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/test/test_preprocess_data.sh b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/test/test_preprocess_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..d121c85958ff35e37431befdceabb831c8cd2705 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/data/test/test_preprocess_data.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +IMPL=cached +python ../preprocess_data.py \ + --input test_samples.json \ + --vocab vocab.txt \ + --dataset-impl ${IMPL} \ + --output-prefix test_samples_${IMPL} \ + --workers 1 \ + --log-interval 2 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/dist_signal_handler.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/dist_signal_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..a60204f004a3f149da0bc059cd875b6ec390c0c4 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/dist_signal_handler.py @@ -0,0 +1,81 @@ +import signal + +import torch + + +def get_world_size(): + if torch.distributed.is_available() and torch.distributed.is_initialized(): + world_size = torch.distributed.get_world_size() + else: + world_size = 1 + return world_size + + +def get_device(local_rank=None): + backend = torch.distributed.get_backend() + if backend == 'nccl': + if local_rank is None: + device = torch.device('cuda') + else: + device = torch.device(f'cuda:{local_rank}') + elif backend == 'gloo': + device = torch.device('cpu') + else: + raise RuntimeError + return device + + +def all_gather_item(item, dtype, group=None, async_op=False, local_rank=None): + if not torch.distributed.is_available() or \ + not torch.distributed.is_initialized(): + return [item] + + device = get_device(local_rank) + + if group is not None: + group_size = group.size() + else: + group_size = get_world_size() + + tensor = torch.tensor([item], device=device, dtype=dtype) + output_tensors = [ + torch.zeros(1, dtype=tensor.dtype, device=tensor.device) + for _ in range(group_size) + ] + torch.distributed.all_gather(output_tensors, tensor, group, async_op) + output = [elem.item() for elem in output_tensors] + return output + + +class DistributedSignalHandler: + def __init__(self, sig=signal.SIGTERM): + self.sig = sig + + def signals_received(self): + all_received = all_gather_item( + self._signal_received, dtype=torch.int32 + ) + return all_received + + def __enter__(self): + self._signal_received = False + self.released = False + self.original_handler = signal.getsignal(self.sig) + + def handler(signum, frame): + self._signal_received = True + + signal.signal(self.sig, handler) + + return self + + def __exit__(self, type, value, tb): + self.release() + + def release(self): + if self.released: + return False + + signal.signal(self.sig, self.original_handler) + self.released = True + return True diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/enums.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/enums.py new file mode 100644 index 0000000000000000000000000000000000000000..d9050462aaf5d542e0f6a669cb293d13664a9b7f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/enums.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum + +class LayerType(enum.Enum): + encoder = 1 + decoder = 2 + +class AttnType(enum.Enum): + self_attn = 1 + cross_attn = 2 + +class AttnMaskType(enum.Enum): + padding = 1 + causal = 2 + prefix = 3 + +class PositionEmbeddingType(enum.Enum): + rotary = 1 + absolute = 2 + alibi = 3 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6886c17e8b640a1723783d398166c5171f2a7740 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__init__.py @@ -0,0 +1,121 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import os +import pathlib +import subprocess + +import torch +from torch.utils import cpp_extension + +# Setting this param to a list has a problem of generating different +# compilation commands (with diferent order of architectures) and +# leading to recompilation of fused kernels. Set it to empty string +# to avoid recompilation and assign arch flags explicity in +# extra_cuda_cflags below +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(args): + + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + if torch.version.hip is None: + _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version( + cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_80,code=sm_80') + if int(bare_metal_minor) >= 1: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_86,code=sm_86') + if int(bare_metal_minor) >= 4: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_87,code=sm_87') + if int(bare_metal_minor) >= 8: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_89,code=sm_89') + if int(bare_metal_major) >= 12: + cc_flag.append('-gencode') + cc_flag.append('arch=compute_90,code=sm_90') + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / 'build' + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags, extra_include_paths): + if torch.version.hip is not None: + extra_cuda_cflags=['-O3'] + extra_cuda_flags + cc_flag + else: + extra_cuda_cflags=['-O3', + '-gencode', 'arch=compute_70,code=sm_70', + '--use_fast_math'] + extra_cuda_flags + cc_flag + + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=['-O3',], + extra_cuda_cflags=extra_cuda_cflags, + extra_include_paths=extra_include_paths, + verbose=(args.rank == 0) + ) + + # ============== + # Fused softmax. + # ============== + + if torch.version.hip is not None: + extra_include_paths=[os.path.abspath(srcpath)] + else: + extra_include_paths=[] + + if args.masked_softmax_fusion: + if torch.version.hip is not None: + extra_cuda_flags = ['-D__HIP_NO_HALF_OPERATORS__=1', + '-D__HIP_NO_HALF_CONVERSIONS__=1'] + else: + extra_cuda_flags = ['-U__CUDA_NO_HALF_OPERATORS__', + '-U__CUDA_NO_HALF_CONVERSIONS__', + '--expt-relaxed-constexpr', + '--expt-extended-lambda'] + + # Upper triangular softmax. + sources=[srcpath / 'scaled_upper_triang_masked_softmax.cpp', + srcpath / 'scaled_upper_triang_masked_softmax_cuda.cu'] + scaled_upper_triang_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_upper_triang_masked_softmax_cuda", + sources, extra_cuda_flags, extra_include_paths) + + # Masked softmax. + sources=[srcpath / 'scaled_masked_softmax.cpp', + srcpath / 'scaled_masked_softmax_cuda.cu'] + scaled_masked_softmax_cuda = _cpp_extention_load_helper( + "scaled_masked_softmax_cuda", sources, extra_cuda_flags, extra_include_paths) + + # Softmax + sources=[srcpath / 'scaled_softmax.cpp', + srcpath / 'scaled_softmax_cuda.cu'] + scaled_softmax_cuda = _cpp_extention_load_helper( + "scaled_softmax_cuda", sources, extra_cuda_flags, extra_include_paths) + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], + universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__pycache__/__init__.cpython-310.pyc b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..834df14217cc09a1f088e5782024213ecf0b9f8a Binary files /dev/null and b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/compat.h b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..5495d7807762d8b4e3dbc11b28dba15f85bd8108 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/compat.h @@ -0,0 +1,17 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + + + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.cpp b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4c8a8c2ee39bcc0f9d04b23b2bf19032d8327e44 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.cpp @@ -0,0 +1,83 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +int get_batch_per_block_cuda( + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads); + +torch::Tensor fwd( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor) { + AT_ASSERTM(input.dim() == 4, "expected 4D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM(mask.dim() == 4, "expected 4D tensor"); + + return fwd_cuda(input, mask, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +int get_batch_per_block( + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) { + return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads); +} + +} // end namespace scaled_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + + m.def("backward", + &multihead_attn::fused_softmax::scaled_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); + + m.def("get_batch_per_block", + &multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block, + "Return Batch per block size." + ); +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.h b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..21ebbd52284203a64f6c7acab82e36fdb6cd7f6f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax.h @@ -0,0 +1,710 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); } + +int log2_ceil(int value) { + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { + return a + b; + } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { + return a < b ? b : a; + } +}; + +template +__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) { + ReduceOp r; + #pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + */ +template +__global__ void scaled_softmax_warp_forward( + output_t *dst, + const input_t *src, + const acc_t scale, + int micro_batch_size, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + int itr_idx = i*element_count+it*WARP_SIZE; + copy_vector(temp_data, src + itr_idx); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = elements[i][it + element] / sum[i]; + } + copy_vector(dst + i * element_count + it * WARP_SIZE, out); + } else { + break; + } + } + } +} + + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Explicit masking + */ +template +__global__ void scaled_masked_softmax_warp_forward( + output_t *dst, + const input_t *src, + const uint8_t *mask, + const acc_t scale, + int micro_batch_size, + int element_count, + int pad_batches) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH; + int pad_first_batch = 0; + if (pad_batches != 1) { // bert style + pad_first_batch = (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH; + } else { // gpt2 style + pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + } + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + uint8_t temp_mask[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + int itr_idx = i*element_count+it*WARP_SIZE; + copy_vector(temp_data, src + itr_idx); + copy_vector(temp_mask, mask + itr_idx); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (temp_mask[element] != 1) { + elements[i][it + element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -10000.0; + } + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + // compute scale value to account for full mask + acc_t scale_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + scale_value[i] = (max_value[i] == -10000.0) ? 0.0 : 1.0; + } + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = elements[i][it + element] * scale_value[i] / sum[i]; + } + copy_vector(dst + i * element_count + it * WARP_SIZE, out); + } else { + break; + } + } + } +} + +template +__global__ void scaled_masked_softmax_warp_backward( + output_t *gradInput, + input_t *grad, + const input_t *output, + acc_t scale, + int micro_batch_size, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) + // gridDim/blockIdx = (seq_len, attn_heads, batches) + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector(temp_grad, grad + i * element_count + it * WARP_SIZE); + copy_vector(temp_output, output + i * element_count + it * WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + + acc_t sum[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + sum[i] += grad_reg[i][it]; + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i])); + } + copy_vector(gradInput + i * element_count + it * WARP_SIZE, out); + } + } + } +} +} // end of anonymous namespace + +int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads){ + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + constexpr int threads_per_block = 128; + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + + return batches_per_block; +} + +template +void dispatch_scaled_softmax_forward( + output_t *dst, + const input_t *src, + const input_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) +{ + TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0); + dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 1: // 2 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 2: // 4 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 3: // 8 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 4: // 16 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 5: // 32 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 6: // 64 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 7: // 128 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 8: // 256 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 9: // 512 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 10: // 1024 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 11: // 2048 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + case 12: // 4096 + scaled_softmax_warp_forward + <<>>(dst, src, scale, batch_count, key_seq_len); + break; + default: + break; + } + } +} + +template +void dispatch_scaled_masked_softmax_forward( + output_t *dst, + const input_t *src, + const uint8_t *mask, + const input_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads, + int pad_batches) +{ + TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0); + dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 1: // 2 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 2: // 4 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 3: // 8 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 4: // 16 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 5: // 32 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 6: // 64 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 7: // 128 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 8: // 256 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 9: // 512 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 10: // 1024 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 11: // 2048 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + case 12: // 4096 + scaled_masked_softmax_warp_forward + <<>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); + break; + default: + break; + } + } +} + +template +void dispatch_scaled_masked_softmax_backward( + output_t *grad_input, + input_t *grad, + const input_t *output, + const acc_t scale, + int query_seq_len, + int key_seq_len, + int batches, + int attn_heads) +{ + TORCH_INTERNAL_ASSERT( key_seq_len >= 0 && key_seq_len <= 4096 ); + if (key_seq_len == 0) { + return; + } else { + int log2_elements = log2_ceil(key_seq_len); + const int next_power_of_two = 1 << log2_elements; + int batch_count = batches * attn_heads * query_seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = batch_count/batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 1: // 2 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 2: // 4 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 3: // 8 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 4: // 16 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 5: // 32 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 6: // 64 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 7: // 128 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 8: // 256 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 9: // 512 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 10: // 1024 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 11: // 2048 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + case 12: // 4096 + scaled_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, key_seq_len); + break; + + default: + break; + } + } +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax_cuda.cu b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..76086de2e7cee01027f19fc85fb3fe524a872c6c --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_masked_softmax_cuda.cu @@ -0,0 +1,109 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include +#include +#include "scaled_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_masked_softmax { + +int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads){ + return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads); +} + + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + torch::Tensor const& mask, + float scale_factor) +{ + // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = input.size(0); + const int pad_batches = mask.size(0); + const int attn_heads = input.size(1); + const int query_seq_len = input.size(2); + const int key_seq_len = input.size(3); + TORCH_INTERNAL_ASSERT(key_seq_len <= 4096); + TORCH_INTERNAL_ASSERT(query_seq_len > 1); + TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches); + TORCH_INTERNAL_ASSERT(mask.size(1) == 1); + TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len); + TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* mask_ptr = static_cast(mask.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_masked_softmax_forward", + dispatch_scaled_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(mask_ptr), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads, + pad_batches); + ); + return softmax_results; +} + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = output_grads.size(0); + const int attn_heads = output_grads.size(1); + const int query_seq_len = output_grads.size(2); + const int key_seq_len = output_grads.size(3); + + auto act_options = output_grads.options().requires_grad(false); + torch::Tensor input_grads = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + void* input_grads_ptr = static_cast(input_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_masked_softmax_backward", + dispatch_scaled_masked_softmax_backward( + reinterpret_cast(input_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + + return input_grads; +} +} +} +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax.cpp b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e10cd77e7fb35247fac2b547d8bccc18c3dca58e --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax.cpp @@ -0,0 +1,61 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +torch::Tensor fwd( + torch::Tensor const& input, + float scale_factor) { + AT_ASSERTM(input.dim() == 4, "expected 4D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return fwd_cuda(input, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +} // end namespace scaled_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_softmax::fwd, + "Self Multihead Attention scaled, softmax -- Forward."); + m.def("backward", + &multihead_attn::fused_softmax::scaled_softmax::bwd, + "Self Multihead Attention scaled, softmax -- Backward."); +} + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax_cuda.cu b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..90e1c9f16eefbbeae4a4b1505149261afd0f1034 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_softmax_cuda.cu @@ -0,0 +1,92 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include +#include +#include "scaled_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor) +{ + // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = input.size(0); + const int attn_heads = input.size(1); + const int query_seq_len = input.size(2); + const int key_seq_len = input.size(3); + TORCH_INTERNAL_ASSERT(key_seq_len <= 4096); + TORCH_INTERNAL_ASSERT(query_seq_len > 1); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_softmax_forward", + dispatch_scaled_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + return softmax_results; +} + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len] + const int batches = output_grads.size(0); + const int attn_heads = output_grads.size(1); + const int query_seq_len = output_grads.size(2); + const int key_seq_len = output_grads.size(3); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_masked_softmax_backward", + dispatch_scaled_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + query_seq_len, + key_seq_len, + batches, + attn_heads); + ); + + //backward pass is completely in-place + return output_grads; +} +} +} +} + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ddfc8646a3dd109b31d633f109079e2c1af98e9d --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.cpp @@ -0,0 +1,58 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor); + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor); + +torch::Tensor fwd(torch::Tensor const& input, float scale_factor) { + AT_ASSERTM(input.dim() == 3, "expected 3D tensor"); + AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) || + (input.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return fwd_cuda(input, scale_factor); +} + +torch::Tensor bwd( + torch::Tensor const& output_grads, + torch::Tensor const& softmax_results, + float scale_factor) { + + AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor"); + AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor"); + + AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) || + (output_grads.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) || + (softmax_results.scalar_type() == at::ScalarType::BFloat16), + "Only fp16 and bf16 are supported"); + + return bwd_cuda(output_grads, softmax_results, scale_factor); +} + +} // end namespace scaled_upper_triang_masked_softmax +} // end namespace fused_softmax +} // end namespace multihead_attn + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd, + "Self Multihead Attention scaled, time masked softmax -- Forward."); + m.def("backward", + &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd, + "Self Multihead Attention scaled, time masked softmax -- Backward."); +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..5711f0fbf49f52af088defdae371720c6bcdb600 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax.h @@ -0,0 +1,524 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace { + +template +__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) { *dst = *src; } + +template <> +__device__ __inline__ void copy_vector(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); } + +template +__device__ __inline__ void copy_zero_vector(Datatype *dst); + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16 *dst) { *dst = 0.0; } + +template <> +__device__ __inline__ void copy_zero_vector(c10::BFloat16 *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); } + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half *dst) { *dst = 0.0; } + +template <> +__device__ __inline__ void copy_zero_vector(c10::Half *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); } + + +int log2_ceil(int value) { + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { + return a + b; + } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { + return a < b ? b : a; + } +}; + +template +__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) { + ReduceOp r; + #pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + +/* + * Extended softmax (from native aten pytorch) with following additional features + * 1) input scaling + * 2) Implicit time (diagonal masking) + */ +template +__global__ void scaled_upper_triang_masked_softmax_warp_forward( + output_t *dst, + const input_t *src, + const acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + int warp_iteration_limit = (local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1)/ WARP_SIZE; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + input_t temp_data[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < batch_element_count) { + copy_vector(temp_data, src + i*element_count*stride + it*WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if ((element_index + element) < batch_element_count) { + elements[i][it+element] = (acc_t)temp_data[element] * scale; + } else { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } else { + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + elements[i][it + element] = -std::numeric_limits::infinity(); + } + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (it < warp_iteration_limit) { + elements[i][it] = std::exp((elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + } + warp_reduce(sum); + + // store result + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + + if (element_index < local_seq) { + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < local_seq) { + out[element] = elements[i][it + element] / sum[i]; + } else { + out[element] = 0; + } + } + copy_vector(dst + i * element_count * stride + it * WARP_SIZE, out); + } else if (element_index < element_count) { + copy_zero_vector(dst + i * element_count * stride + it * WARP_SIZE); + } else { + break; + } + } + } +} + +template +__global__ void scaled_upper_triang_masked_softmax_warp_backward( + output_t *gradInput, + input_t *grad, + const input_t *output, + acc_t scale, + int micro_batch_size, + int stride, + int element_count) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + // warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4; + + int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; + int local_seq = blockIdx.x + 1; + + // micro_batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = micro_batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + + // the first element to process by the current thread + int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; + input_t temp_grad[ELEMENTS_PER_LDG_STG]; + input_t temp_output[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : local_seq; + + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + copy_vector(temp_grad, grad + i * element_count * stride + it * WARP_SIZE); + copy_vector(temp_output, output + i * element_count * stride + it * WARP_SIZE); + + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + output_reg[i][it + element] = (acc_t)temp_output[element]; + } + } + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + if (element_index + element < batch_element_count) { + grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; + } + } + } + } + } + + acc_t sum[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + sum[i] = grad_reg[i][0]; + #pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + sum[i] += grad_reg[i][it]; + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { + int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; + if (element_index < element_count) { + // compute gradients + output_t out[ELEMENTS_PER_LDG_STG]; + #pragma unroll + for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { + out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i])); + } + copy_vector(gradInput + i * element_count * stride + it * WARP_SIZE, out); + } + } + } +} + +} // end of anonymous namespace + +template +void dispatch_scaled_upper_triang_masked_softmax_forward( + output_t *dst, + const input_t *src, + const input_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 16384 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); + + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 12: // 4096 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 13: // 8192 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 14: // 16384 + scaled_upper_triang_masked_softmax_warp_forward + <<>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + + default: + break; + } + } +} + +template +void dispatch_scaled_upper_triang_masked_softmax_backward( + output_t *grad_input, + input_t *grad, + const input_t *output, + const acc_t scale, + int softmax_elements, + int softmax_elements_stride, + int attn_batches) +{ + TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 16384 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + int seq_len = softmax_elements; + int batch_count = attn_batches * seq_len; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. + int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); + + int blocks_per_seq = attn_batches / batches_per_block; + dim3 blocks(seq_len, blocks_per_seq, 1); + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 1: // 2 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 2: // 4 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 3: // 8 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 4: // 16 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 5: // 32 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 6: // 64 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 7: // 128 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 8: // 256 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 9: // 512 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 10: // 1024 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 11: // 2048 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 12: // 4096 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 13: // 8192 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + case 14: // 16384 + scaled_upper_triang_masked_softmax_warp_backward + <<>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); + break; + default: + break; + } + } +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..74c9f3d2163c7b82be65dc8d2b814c6d4681f4c1 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/scaled_upper_triang_masked_softmax_cuda.cu @@ -0,0 +1,86 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + +#include +#include +#include +#include +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include +#include +#include "scaled_upper_triang_masked_softmax.h" +#include "type_shim.h" + +namespace multihead_attn { +namespace fused_softmax { +namespace scaled_upper_triang_masked_softmax { + +torch::Tensor fwd_cuda( + torch::Tensor const& input, + float scale_factor) +{ + // input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = input.size(0); + const int seq_len = input.size(1); + TORCH_INTERNAL_ASSERT(seq_len <= 16384); + + // Output + auto act_options = input.options().requires_grad(false); + torch::Tensor softmax_results = + torch::empty({attn_batches, seq_len, seq_len}, act_options); + + // Softmax Intermediate Result Ptr + void* input_ptr = static_cast(input.data_ptr()); + void* softmax_results_ptr = static_cast(softmax_results.data_ptr()); + + DISPATCH_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_forward", + dispatch_scaled_upper_triang_masked_softmax_forward( + reinterpret_cast(softmax_results_ptr), + reinterpret_cast(input_ptr), + scale_factor, + seq_len, + seq_len, + attn_batches); + ); + return softmax_results; +} + + +torch::Tensor bwd_cuda( + torch::Tensor const& output_grads_, + torch::Tensor const& softmax_results_, + float scale_factor) { + + auto output_grads = output_grads_.contiguous(); + auto softmax_results = softmax_results_.contiguous(); + + //output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len] + const int attn_batches = output_grads.size(0); + const int seq_len = output_grads.size(1); + TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2)); + + void* output_grads_ptr = static_cast(output_grads.data_ptr()); + + //Softmax Grad + DISPATCH_HALF_AND_BFLOAT( + output_grads_.scalar_type(), + "dispatch_scaled_upper_triang_masked_softmax_backward", + dispatch_scaled_upper_triang_masked_softmax_backward( + reinterpret_cast(output_grads_ptr), + reinterpret_cast(output_grads_ptr), + reinterpret_cast(softmax_results.data_ptr()), + scale_factor, + seq_len, + seq_len, + attn_batches); + ); + + //backward pass is completely in-place + return output_grads; +} +} +} +} diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/tests/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/tests/test_fused_kernels.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/tests/test_fused_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..74024c5020f45ea3e80607fad38a45c4dab3453b --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/tests/test_fused_kernels.py @@ -0,0 +1,388 @@ +import math + +import torch +from torch.nn import LayerNorm + +from megatron.model.enums import AttnMaskType +from megatron.model.fused_layer_norm import MixedFusedLayerNorm +from megatron.model.fused_softmax import FusedScaleMaskSoftmax +from megatron.model.utils import attention_mask_func +from megatron.fused_kernels import load + +def test_load_fused_kernels(): + try: + import fused_layer_norm_cuda + import scaled_masked_softmax_cuda + import scaled_upper_triang_masked_softmax_cuda + import torch + + print("[Success] load_fused_kernels") + except ImportError as e: + print("[Fail] load_fused_kernels") + raise e + +def test_fused_softmax(): + bert = BertModel.from_pretrained("bert-base-cased").cuda().half() + tokenizer = BertTokenizer.from_pretrained("bert-base-cased") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + embedding_output = bert.embeddings( + input_ids=tokens["input_ids"].cuda(), + position_ids=None, + token_type_ids=tokens["token_type_ids"].cuda(), + inputs_embeds=None, + past_key_values_length=0, + ) + + # (bsz, 1, 1, seq_len) + mask = bert.get_extended_attention_mask( + attention_mask=tokens["attention_mask"].cuda(), + input_shape=tokens["input_ids"].shape, + device=bert.device, + ) + # (bsz, 1, seq_len, seq_len) + mask = mask.repeat(1, 1, mask.size()[-1], 1) + + attention = bert.encoder.layer[0].attention.self + key_layer = attention.transpose_for_scores(attention.key(embedding_output)) + query_layer = attention.transpose_for_scores(attention.query(embedding_output)) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores /= math.sqrt(key_layer.size()[-1]) + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.padding, + scaled_masked_softmax_fusion=True, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attention_scores, + (mask != 0), + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.padding, + scaled_masked_softmax_fusion=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attention_scores, + (mask != 0), + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + + +def test_fused_upper_triangle_mask_softmax(): + gpt = GPT2Model.from_pretrained("gpt2").cuda().half() + tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi" # 24 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + attention_mask = tokens["attention_mask"].cuda() + attention_mask = attention_mask.view(attention_mask.size(0), -1) + attention_mask = attention_mask[:, None, None, :] + attention_mask = (1.0 - attention_mask) * -10000.0 + attention_mask = attention_mask.repeat(1, 1, attention_mask.size()[-1], 1) + attn = gpt.h[0] + + hidden_states = gpt.wte(tokens["input_ids"].cuda()) + q, k, v = attn.attn.c_attn(hidden_states).split(768, dim=-1) + q = attn.attn._split_heads(q, attn.attn.num_heads, attn.attn.head_dim) + k = attn.attn._split_heads(k, attn.attn.num_heads, attn.attn.head_dim) + attn_weights = torch.matmul(q, k.transpose(-1, -2)) + + sq, sk = q.size(-2), k.size(-2) + causal_mask = attn.attn.bias[:, :, sk - sq : sk, :sk].bool() + total_mask = ~(causal_mask & (attention_mask == 0)) + """ + tensor([[[[False, True, True, ..., True, True, True], + [False, False, True, ..., True, True, True], + [False, False, False, ..., True, True, True], + ..., + [False, False, False, ..., False, True, True], + [False, False, False, ..., False, False, True], + [False, False, False, ..., False, False, False]]] + """ + + fused_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=True, + ) + .cuda() + .half() + ) + + fused_softmax_output = fused_softmax( + attn_weights, + total_mask, + ) + + torch_softmax = ( + FusedScaleMaskSoftmax( + input_in_fp16=True, + input_in_bf16=False, + mask_func=attention_mask_func, + scale=None, + softmax_in_fp32=False, + attn_mask_type=AttnMaskType.causal, + scaled_masked_softmax_fusion=False, + ) + .cuda() + .half() + ) + + torch_softmax_output = torch_softmax( + attn_weights, + total_mask, + ) + + test_result = (fused_softmax_output - torch_softmax_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}" + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_fused_upper_triangle_mask_softmax" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_softmax_output[-1][-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_softmax_output[-1][-1][-1][:5].tolist()}" + ) + + +def test_layer_norm(): + bert = BertModel.from_pretrained("bert-base-cased").cuda().half() + tokenizer = BertTokenizer.from_pretrained("bert-base-cased") + test_text = ( + "Hello. How are you? I am fine thank you and you? yes Good. " + "hi hi hi hi hi hi hi hi hi hi hi hi hi" # 32 + ) + + tokens = tokenizer( + [test_text] * 4, + return_tensors="pt", + ) + + # [bsz, seq_len, d_model] + embedding_output = ( + bert.embeddings( + input_ids=tokens["input_ids"].cuda(), + position_ids=None, + token_type_ids=tokens["token_type_ids"].cuda(), + inputs_embeds=None, + past_key_values_length=0, + ) + .cuda() + .half() + ) + + fused_layernorm_layer = ( + MixedFusedLayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half() + ) + + torch_layernorm_layer = ( + LayerNorm(normalized_shape=embedding_output.size(-1)).cuda().half() + ) + + fused_output = fused_layernorm_layer(embedding_output) + torch_output = torch_layernorm_layer(embedding_output) + test_result = (fused_output - torch_output).abs() + + while test_result.dim() != 1: + test_result = test_result.mean(dim=-1) + + diff = test_result.mean(dim=-1) + + if diff <= 1e-3: + print( + f"\n[Success] test_layer_norm" + f"\n > mean_difference={diff}" + f"\n > fused_values={fused_output[-1][-1][:5].tolist()}" + f"\n > torch_values={torch_output[-1][-1][:5].tolist()}" + ) + else: + print( + f"\n[Fail] test_layer_norm" + f"\n > mean_difference={diff}, " + f"\n > fused_values={fused_output[-1][-1][:5].tolist()}, " + f"\n > torch_values={torch_output[-1][-1][:5].tolist()}" + ) + + +def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + +def forward_torch_softmax(input, mask, scale): + input = input * scale + mask_output = attention_mask_func(input, mask) if mask is not None else input + probs = torch.nn.Softmax(dim=-1)(mask_output) + return probs + + +def test_masked_softmax_forward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + error = (softmax_results_torch - softmax_results).abs().max() + assert error < 1e-3 + +def test_masked_softmax_backward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0') + masks = torch.randint(0, 2, (batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item()) + + inputs.requires_grad = True + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + softmax_results_torch.backward(backward) + error = (back_grad - inputs.grad).abs().max() + assert error < 1e-3 + + +def test_allmasked_softmax_forward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + softmax_results_torch = torch.zeros_like(inputs) + error = (softmax_results_torch - softmax_results).abs().max() + assert error == 0.0 + + +def test_allmasked_softmax_backward(): + import scaled_masked_softmax_cuda + + batch = 2 + attn = 16 + scale_t = torch.tensor([1.0]) + for qlen in [128, 256, 1024, 2048, 4096]: + for klen in [128, 256, 1024, 2048]: + inputs = torch.normal(0, 2, (batch, attn, qlen, klen), dtype=torch.float16, device='cuda:0') + backward = torch.rand_like(inputs, dtype=torch.float16, device='cuda:0') + masks = torch.ones((batch, 1, qlen, klen), dtype=torch.bool, device='cuda:0') + softmax_results = scaled_masked_softmax_cuda.forward(inputs, masks, scale_t[0].item()) + back_grad = scaled_masked_softmax_cuda.backward(backward, softmax_results, scale_t[0].item()) + inputs.requires_grad = True + softmax_results_torch = forward_torch_softmax(inputs, masks, scale_t[0].item()) + softmax_results_torch.backward(backward) + error = (back_grad - inputs.grad).abs().max() + assert error < 1e-3 + + +if __name__ == "__main__": + try: + from transformers import BertTokenizer, GPT2Tokenizer + from transformers.models.bert.modeling_bert import BertModel + from transformers.models.gpt2.modeling_gpt2 import GPT2Model + import transformers + + transformers.logging.set_verbosity( + transformers.logging.FATAL, + ) + + except: + print("\n[Fail] Please install `transformers` package to test fused kernels\n") + exit(-1) + + load() + test_masked_softmax_forward() + test_masked_softmax_backward() + test_allmasked_softmax_forward() + test_allmasked_softmax_backward() + test_load_fused_kernels() + test_fused_softmax() + test_fused_upper_triangle_mask_softmax() + test_layer_norm() diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/type_shim.h b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..d60a6f8c6fb50e241f9ddcc852adec71e963e1b2 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/fused_kernels/type_shim.h @@ -0,0 +1,103 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */ + + +#include +#include "compat.h" + + +#define DISPATCH_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch(TYPE) \ + { \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + + +#define DISPATCH_HALF_BFLOAT_AND_FLOAT(TYPE, NAME, ...) \ + switch(TYPE) \ + { \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + + + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch(TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch(TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/indexer.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..45f530a7d4d7d72bfeb1f2d9a3098b3812a332fe --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/indexer.py @@ -0,0 +1,129 @@ +import sys +import time +import torch +import torch.distributed as dist + +from megatron import get_args, print_rank_0 +from megatron.core import mpu +from megatron.checkpointing import load_biencoder_checkpoint +from megatron.data.orqa_wiki_dataset import get_open_retrieval_wiki_dataset +from megatron.data.orqa_wiki_dataset import get_open_retrieval_batch +from megatron.data.biencoder_dataset_utils import get_one_epoch_dataloader +from megatron.data.realm_index import detach, OpenRetreivalDataStore +from megatron.model.biencoder_model import get_model_provider +from megatron.training import get_model + + +class IndexBuilder(object): + """ + Object for taking one pass over a dataset and creating a BlockData of its + embeddings + """ + def __init__(self): + args = get_args() + self.model = None + self.dataloader = None + self.evidence_embedder_obj = None + self.biencoder_shared_query_context_model = \ + args.biencoder_shared_query_context_model + + # need to know whether we're using a REALM checkpoint (args.load) + # or ICT checkpoint + assert not (args.load and args.ict_load) + + self.log_interval = args.indexer_log_interval + self.batch_size = args.indexer_batch_size + + self.load_attributes() + self.is_main_builder = mpu.get_data_parallel_rank() == 0 + self.num_total_builders = mpu.get_data_parallel_world_size() + self.iteration = self.total_processed = 0 + + def load_attributes(self): + """ + Load the necessary attributes: model, dataloader and empty BlockData + """ + only_context_model = True + if self.biencoder_shared_query_context_model: + only_context_model = False + + model = get_model(get_model_provider(only_context_model=\ + only_context_model, biencoder_shared_query_context_model=\ + self.biencoder_shared_query_context_model)) + + self.model = load_biencoder_checkpoint(model, + only_context_model=only_context_model) + + assert len(self.model) == 1 + self.model[0].eval() + + self.dataset = get_open_retrieval_wiki_dataset() + self.dataloader = iter(get_one_epoch_dataloader(self.dataset, \ + self.batch_size)) + + self.evidence_embedder_obj = OpenRetreivalDataStore( \ + load_from_path=False) + + def track_and_report_progress(self, batch_size): + """ + Utility function for tracking progress + """ + self.iteration += 1 + self.total_processed += batch_size * self.num_total_builders + if self.is_main_builder and self.iteration % self.log_interval == 0: + print('Batch {:10d} | Total {:10d}'.format(self.iteration, + self.total_processed), flush=True) + + def build_and_save_index(self): + """ + Goes through one epoch of the dataloader and adds all data to this + instance's BlockData. + + The copy of BlockData is saved as a shard, which when run in a + distributed setting will be consolidated by the rank 0 process + and saved as a final pickled BlockData. + """ + assert len(self.model) == 1 + unwrapped_model = self.model[0] + + while not hasattr(unwrapped_model, 'embed_text'): + unwrapped_model = unwrapped_model.module + + while True: + try: + # batch also has query_tokens and query_pad_data + row_id, context_tokens, context_mask, context_types, \ + context_pad_mask = get_open_retrieval_batch( \ + self.dataloader) + except (StopIteration, IndexError): + break + + # TODO: can we add with torch.no_grad() to reduce memory usage + # detach, separate fields and add to BlockData + assert context_mask.dtype == torch.bool + context_logits = unwrapped_model.embed_text( + unwrapped_model.context_model, context_tokens, context_mask, + context_types) + + context_logits = detach(context_logits) + row_id = detach(row_id) + + self.evidence_embedder_obj.add_block_data(row_id, context_logits) + self.track_and_report_progress(batch_size=len(row_id)) + + # This process signals to finalize its shard and then synchronize with + # the other processes + self.evidence_embedder_obj.save_shard() + torch.distributed.barrier() + del self.model + + # rank 0 process builds the final copy + if self.is_main_builder: + self.evidence_embedder_obj.merge_shards_and_save() + # make sure that every single piece of data was embedded + assert len(self.evidence_embedder_obj.embed_data) == \ + len(self.dataset) + self.evidence_embedder_obj.clear() + + # complete building the final copy + torch.distributed.barrier() diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/initialize.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..058568ee89d38443420bc55f30d2d1cff76d0e24 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/initialize.py @@ -0,0 +1,390 @@ +# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Megatron initialization.""" + +import random +import os +import time +import shutil + +import numpy as np +import torch +from datetime import timedelta + +from megatron import fused_kernels +from megatron import get_adlr_autoresume +from megatron import get_args +from megatron import get_tensorboard_writer +from megatron.core import mpu, tensor_parallel +from megatron.arguments import (parse_args, validate_args) +from megatron.checkpointing import load_args_from_checkpoint +from megatron.global_vars import set_global_variables +from megatron.model.transformer import bias_dropout_add_fused_train +from megatron.model.fused_bias_gelu import bias_gelu +from megatron.utils import is_rank_0 +from deepspeed.accelerator import get_accelerator +import deepspeed +from deepspeed.ops.op_builder.builder import OpBuilder + +is_rocm_pytorch = OpBuilder.is_rocm_pytorch() + + +def initialize_megatron(extra_args_provider=None, args_defaults={}, + ignore_unknown_args=False, allow_no_cuda=False): + """Set global variables, initialize distributed, and + set autoresume and random seeds. + `allow_no_cuda` should not be set unless using megatron for cpu only + data processing. In general this arg should not be set unless you know + what you are doing. + Returns a function to finalize distributed env initialization + (optionally, only when args.lazy_mpu_init == True) + """ + if not allow_no_cuda: + # Make sure cuda is available. + assert get_accelerator().is_available(), 'Megatron requires accelerator.' + + # Parse arguments + args = parse_args(extra_args_provider, ignore_unknown_args) + # Set input args. + for key in args_defaults: + # The args_defaults is for those who want to set default value or pass parameters when + # calling Python functions. Instead of using arguments passed in from outside the program. + if getattr(args, key) is not None: + if args.rank == 0: + print('INFO: overriding default arguments for {key}:{v} \ + with {key}:{v2}'.format(key=key, v=getattr(args, key), + v2=args_defaults[key]), + flush=True) + else: + setattr(args, key, args_defaults[key]) + + + if args.use_checkpoint_args or args_defaults.get('use_checkpoint_args', False): + assert args.load is not None, '--use-checkpoints-args requires --load argument' + load_args_from_checkpoint(args) + + validate_args(args) + + # set global args, build tokenizer, and set adlr-autoresume, + # tensorboard-writer, and timers. + set_global_variables(args) + + # profiler config, must be done before hpu initialization + if args.profile == 'hltv': + os.environ['HABANA_PROFILE'] = 'profile_api_with_nics' + shutil.rmtree('.graph_dumps', ignore_errors=True) + + # torch.distributed initialization + def finish_mpu_init(): + args = get_args() + # Pytorch distributed. + _initialize_distributed() + + # Random seeds for reproducibility. + if args.rank == 0: + print('> setting random seeds to {} ...'.format(args.seed)) + _set_random_seed(args.seed, args.data_parallel_random_init) + + args = get_args() + if args.lazy_mpu_init: + # TODO is this still a necessary option? + args.use_cpu_initialization=True + # delayed initialization of DDP-related stuff + # We only set basic DDP globals + mpu.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size) + # and return function for external DDP manager + # to call when it has DDP initialized + mpu.set_tensor_model_parallel_rank(args.rank) + return finish_mpu_init + else: + # Megatron's MPU is the master. Complete initialization right away. + finish_mpu_init() + + # Initialize memory buffers. + _initialize_mem_buffs() + + # Autoresume. + _init_autoresume() + + # Compile dependencies. + _compile_dependencies() + + # No continuation function + return None + + +def _compile_dependencies(): + + args = get_args() + + # ========================= + # Compile dataset C++ code. + # ========================= + # TODO: move this to ninja + if is_rank_0(): + start_time = time.time() + print('> compiling dataset index builder ...') + from megatron.data.dataset_utils import compile_helper + compile_helper() + print('>>> done with dataset index builder. Compilation time: {:.3f} ' + 'seconds'.format(time.time() - start_time), flush=True) + + if not get_accelerator().device_name() == 'cuda': + print(">fused kernel is only supported in cuda, skip loading fused kernel") + return + + if args.use_dataset_only: + return + # ================== + # Load fused kernels + # ================== + + # Custom kernel constraints check. + seq_len = args.seq_length + attn_batch_size = \ + (args.num_attention_heads / args.tensor_model_parallel_size) * \ + args.micro_batch_size + # Constraints on sequence length and attn_batch_size to enable warp based + # optimization and upper triangular optimization (for causal mask) + custom_kernel_constraint = seq_len > 16 and seq_len <=4096 and \ + seq_len % 4 == 0 and attn_batch_size % 4 == 0 + # Print a warning. + if not ((args.fp16 or args.bf16) and + custom_kernel_constraint and + args.masked_softmax_fusion): + if args.rank == 0: + print('WARNING: constraints for invoking optimized' + ' fused softmax kernel are not met. We default' + ' back to unfused kernel invocations.', flush=True) + + # Always build on rank zero first. + if is_rank_0(): + start_time = time.time() + print('> compiling and loading fused kernels ...', flush=True) + if get_accelerator().device_count() > 0: # Skip when CPU-only + fused_kernels.load(args) + torch.distributed.barrier() + else: + torch.distributed.barrier() + fused_kernels.load(args) + # Simple barrier to make sure all ranks have passed the + # compilation phase successfully before moving on to the + # rest of the program. We think this might ensure that + # the lock is released. + torch.distributed.barrier() + if is_rank_0(): + print('>>> done with compiling and loading fused kernels. ' + 'Compilation time: {:.3f} seconds'.format( + time.time() - start_time), flush=True) + + +def setup_deepspeed_random_and_activation_checkpointing(args): + '''Optional DeepSpeed Activation Checkpointing features. + Gives access to partition activations, contiguous memory optimizations + and cpu checkpointing. + Activation checkpoint requires keep track of the random states + and setting the random seed for each MP process. Megatron uses + mpu.get_cuda_rng_tracker and mpu.model_parallel_cuda_manual_seed + for keeping track of the random states and setting the random seeds. + Since they are used in places outside of activation checkpointing, + we overwrite them to maintain consistency. + This must be called before all the calls to mpu.model_parallel_cuda_manual_seed + ''' + num_layers = args.num_layers // args.checkpoint_num_layers + num_layers = num_layers if args.num_layers % args.checkpoint_num_layers == 0 else num_layers + 1 + if args.split_transformers: + num_layers *= 2 + + deepspeed.checkpointing.configure( + mpu, + partition_activations=args.partition_activations, + contiguous_checkpointing=args.contigious_checkpointing, + num_checkpoints=num_layers, + checkpoint_in_cpu=args.checkpoint_in_cpu, + synchronize=args.synchronize_each_layer, + profile=args.profile_backward) + + +def _initialize_distributed(): + """Initialize torch.distributed and core model parallel.""" + args = get_args() + device_count = get_accelerator().device_count() + if torch.distributed.is_initialized(): + + if args.rank == 0: + print('torch distributed is already initialized, ' + 'skipping initialization ...', flush=True) + args.rank = torch.distributed.get_rank() + args.world_size = torch.distributed.get_world_size() + + else: + if args.rank == 0: + print('> initializing torch distributed ...', flush=True) + # Manually set the device ids. + if device_count > 0: + device = args.rank % device_count + if args.local_rank is not None: + assert args.local_rank == device, \ + 'expected local-rank to be the same as rank % device-count.' + else: + args.local_rank = device + + get_accelerator().set_device(device) # only do so when device_count > 0 + + # Call the init process + if args.deepspeed or args.ds_inference: + deepspeed.init_distributed() + else: + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group( + backend=get_accelerator().communication_backend_name(), + world_size=args.world_size, rank=args.rank, + timeout=timedelta(minutes=args.distributed_timeout_minutes)) + + # Set the tensor model-parallel, pipeline model-parallel, and + # data-parallel communicators. + if device_count > 0: + if mpu.model_parallel_is_initialized(): + print('model parallel is already initialized') + else: + if args.ds_sequence_parallel_size > 1 and args.sequence_parallel: + raise RuntimeError( + f"sequence_parallel_size > 1 enables DeepSpeed's sequence parallel, " + f"which is not compatible with Megatron-LM's sequence parallel. " + f"Remove --sequence_parallel to use DeepSpeed's sequence parallel." + ) + + mpu.initialize_model_parallel(args.tensor_model_parallel_size, + args.pipeline_model_parallel_size, + args.ds_sequence_parallel_size, + args.virtual_pipeline_model_parallel_size, + args.pipeline_model_parallel_split_rank, + use_distributed_optimizer=args.use_distributed_optimizer) + if args.rank == 0: + print(f'> initialized tensor model parallel with size ' + f'{mpu.get_tensor_model_parallel_world_size()}') + print(f'> initialized pipeline model parallel with size ' + f'{mpu.get_pipeline_model_parallel_world_size()}') + + if args.deepspeed and args.deepspeed_activation_checkpointing: + setup_deepspeed_random_and_activation_checkpointing(args) + + +def _init_autoresume(): + """Set autoresume start time.""" + autoresume = get_adlr_autoresume() + if autoresume: + torch.distributed.barrier() + autoresume.init() + torch.distributed.barrier() + + +def _set_random_seed(seed_, data_parallel_random_init=False): + """Set random seed for reproducability.""" + if seed_ is not None and seed_ > 0: + if get_accelerator().device_count() == 0: + # No need for CPU-only case. + seed = seed_ + else: + # Ensure that different pipeline MP stages get different seeds. + seed = seed_ + (100 * mpu.get_pipeline_model_parallel_rank()) + # Ensure different data parallel ranks get different seeds + if data_parallel_random_init: + seed = seed + (10 * mpu.get_data_parallel_rank()) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if get_accelerator().device_count() > 0: + tensor_parallel.model_parallel_cuda_manual_seed(seed) + else: + raise ValueError('Seed ({}) should be a positive integer.'.format(seed)) + + +def write_args_to_tensorboard(): + """Write arguments to tensorboard.""" + args = get_args() + writer = get_tensorboard_writer() + if writer: + for arg in vars(args): + writer.add_text(arg, str(getattr(args, arg)), + global_step=args.iteration) + + +def _initialize_mem_buffs(): + """Initialize manually allocated static memory.""" + args = get_args() + # Initialize memory for checkpointed activations. + if args.distribute_checkpointed_activations: + tensor_parallel.init_checkpointed_activations_memory_buffer() + + +def set_jit_fusion_options(): + """Set PyTorch JIT layer fusion options.""" + # flags required to enable jit fusion kernels + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + if ((TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)) and not is_rocm_pytorch: + # nvfuser + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_nvfuser_enabled(True) + torch._C._debug_set_autodiff_subgraph_inlining(False) + else: + # legacy pytorch fuser + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + + _warmup_jit_function() + + +def _warmup_jit_function(): + """ Compilie JIT functions before the main training steps """ + args = get_args() + if args.bf16: + dtype = torch.bfloat16 + elif args.fp16: + dtype = torch.float16 + else: + dtype = torch.float32 + + # Warmup fused bias+gelu + bias = torch.rand(args.ffn_hidden_size // args.tensor_model_parallel_size, + dtype=dtype, device='cuda') + input = torch.rand((args.seq_length // args.ds_sequence_parallel_size, args.micro_batch_size, + args.ffn_hidden_size // args.tensor_model_parallel_size), + dtype=dtype, device='cuda') + # Warmup JIT fusions with the input grad_enable state of both forward + # prop and recomputation + for bias_grad, input_grad in zip([True, True], [False, True]): + bias.requires_grad, input.requires_grad = bias_grad, input_grad + for _ in range(5): + output = bias_gelu(bias, input) + del bias, input, output + + # Warmup fused bias+dropout+add + if args.sequence_parallel: + seq_length = args.seq_length // mpu.get_tensor_model_parallel_world_size() + else: + seq_length = args.seq_length + input = torch.rand((seq_length // args.ds_sequence_parallel_size, args.micro_batch_size, args.hidden_size), + dtype=dtype, device='cuda') + residual = torch.rand((seq_length // args.ds_sequence_parallel_size, args.micro_batch_size, args.hidden_size), + dtype=dtype, device='cuda') + bias = torch.rand((args.hidden_size), dtype=dtype, device='cuda').expand_as(residual) + dropout_rate = 0.1 + # Warmup JIT fusions with the input grad_enable state of both forward + # prop and recomputation + for input_grad, bias_grad, residual_grad in zip([False, True], [True, True], [True, True]): + input.requires_grad = input_grad + bias.requires_grad = bias_grad + residual.requires_grad = residual_grad + for _ in range(5): + output = bias_dropout_add_fused_train(input, bias, residual, dropout_rate) + del bias, input, residual, output + get_accelerator().empty_cache() diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/memory.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..e63afa51f0835f1383cd4f605682e63c386f0ea4 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/memory.py @@ -0,0 +1,132 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +import torch +from deepspeed.accelerator import get_accelerator + +# A dictionary of all the memory buffers allocated. +_MEM_BUFFS = dict() + + +def allocate_mem_buff(name, numel, dtype, track_usage): + """Allocate a memory buffer.""" + assert name not in _MEM_BUFFS, \ + 'memory buffer {} already allocated.'.format(name) + _MEM_BUFFS[name] = MemoryBuffer(name, numel, dtype, track_usage) + return _MEM_BUFFS[name] + + +def get_mem_buff(name): + """Get the memory buffer.""" + return _MEM_BUFFS[name] + + +class MemoryBuffer: + """Contiguous memory buffer. + Allocate a contiguous memory of type `dtype` and size `numel`. It is + used to reduce memory fragmentation. + + Usage: After the allocation, the `_start` index is set tot the first + index of the memory. A memory chunk starting from `_start` index + can be `allocated` for an input tensor, with the elements of the + tensor being coppied. The buffer can be reused by resetting the + `_start` index. + + """ + def __init__(self, name, numel, dtype, track_usage): + if torch.distributed.get_rank() == 0: + element_size = torch.tensor([], dtype=dtype).element_size() + print('> building the {} memory buffer with {} num elements ' + 'and {} dtype ({:.1f} MB)...'.format( + name, numel, dtype, numel*element_size/1024/1024), + flush=True) + self.name = name + self.numel = numel + self.dtype = dtype + self.data = torch.empty(self.numel, + dtype=self.dtype, + device=get_accelerator().current_device_name(), + requires_grad=False) + + # Index tracking the start of the free memory. + self._start = 0 + + # Values used for tracking usage. + self.track_usage = track_usage + if self.track_usage: + self.in_use_value = 0.0 + self.total_value = 0.0 + + + def reset(self): + """Reset the buffer start index to the beginning of the buffer.""" + self._start = 0 + + + def is_in_use(self): + """Whether the current buffer hold on to any memory.""" + return self._start > 0 + + + def numel_in_use(self): + """Return number of elements in use.""" + return self._start + + + def add(self, tensor): + """Allocate a chunk of memory from the buffer to tensor and copy + the values.""" + assert tensor.dtype == self.dtype, \ + 'Input tensor type {} different from buffer type {}'.format( + tensor.dtype, self.dtype) + # Number of elements of the input tensor. + tensor_numel = torch.numel(tensor) + new_start = self._start + tensor_numel + assert new_start <= self.numel, \ + 'Not enough memory left in the buffer ({} > {})'.format( + tensor_numel, self.numel - self._start) + # New tensor is a view into the memory. + new_tensor = self.data[self._start:new_start] + self._start = new_start + new_tensor = new_tensor.view(tensor.shape) + new_tensor.copy_(tensor) + # Return a pointer to the new tensor. + return new_tensor + + + def get_data(self): + """Return the data currently in use.""" + if self.track_usage: + self.in_use_value += float(self._start) + self.total_value += float(self.numel) + return self.data[:self._start] + + + def print_average_usage(self): + """Print memory usage average over time. We would like this value + to be as high as possible.""" + assert self.track_usage, 'You need to enable track usage.' + if torch.distributed.get_rank() == 0: + print(' > usage of {} memory buffer: {:.2f} %'.format( + self.name, self.in_use_value * 100.0 / self.total_value), + flush=True) + + + +class RingMemBuffer: + """A ring of memory buffers.""" + + def __init__(self, name, num_buffers, numel, dtype, track_usage): + self.num_buffers = num_buffers + self.buffers = [ + allocate_mem_buff(name+' {}'.format(i), numel, dtype, track_usage) + for i in range(num_buffers)] + self._index = -1 + + + def get_next_buffer(self): + self._index += 1 + self._index = self._index % self.num_buffers + buff = self.buffers[self._index] + assert not buff.is_in_use(), 'buffer is already in use.' + return buff diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/commons.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/commons.py new file mode 100644 index 0000000000000000000000000000000000000000..0fdb5ab0fc89a791a9b8413f4c4b35d1ec25f7b4 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/commons.py @@ -0,0 +1,70 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +import argparse +import os +import random +import numpy +import torch + +import mpu +from deepspeed.accelerator import get_accelerator + +class IdentityLayer(torch.nn.Module): + def __init__(self, size, scale=1.0): + super(IdentityLayer, self).__init__() + self.weight = torch.nn.Parameter(scale * torch.randn(size)) + + def forward(self): + return self.weight + + +def set_random_seed(seed): + """Set random seed for reproducability.""" + random.seed(seed) + numpy.random.seed(seed) + torch.manual_seed(seed) + mpu.model_parallel_cuda_manual_seed(seed) + + +def initialize_distributed(backend='nccl'): + """Initialize torch.distributed.""" + # Get local rank in case it is provided. + parser = argparse.ArgumentParser() + parser.add_argument('--local_rank', type=int, default=None, + help='local rank passed from distributed launcher') + args = parser.parse_args() + local_rank = args.local_rank + + # Get rank and world size. + rank = int(os.getenv('RANK', '0')) + world_size = int(os.getenv("WORLD_SIZE", '1')) + + print('> initializing torch.distributed with local rank: {}, ' + 'rank: {}, world size: {}'.format(local_rank, rank, world_size)) + + # Set the device id. + device = rank % get_accelerator().device_count() + if local_rank is not None: + device = local_rank + get_accelerator().set_device(device) + + # Call the init process. + init_method = 'tcp://' + master_ip = os.getenv('MASTER_ADDR', 'localhost') + master_port = os.getenv('MASTER_PORT', '6000') + init_method += master_ip + ':' + master_port + torch.distributed.init_process_group( + backend=backend, + world_size=world_size, + rank=rank, + init_method=init_method) + + +def print_separator(message): + torch.distributed.barrier() + filler_len = (78 - len(message)) // 2 + filler = '-' * filler_len + string = '\n' + filler + ' {} '.format(message) + filler + if torch.distributed.get_rank() == 0: + print(string, flush=True) + torch.distributed.barrier() diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_data.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..1e95447099f8578a03b30ff3161cfbd9c20e93f6 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_data.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import print_separator +from commons import initialize_distributed +from deepspeed.accelerator import get_accelerator +from mpu import data as data_utils +import mpu +import torch +import functools +import operator +import sys +sys.path.append("../..") + + +def test_broadcast_data(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing broadcast_data with model parallel size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + torch.manual_seed(1234 + mpu.get_data_parallel_rank()) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + key_size_t = {'key1': [7, 11], + 'key2': [8, 2, 1], + 'key3': [13], + 'key4': [5, 1, 2], + 'key5': [5, 12]} + keys = list(key_size_t.keys()) + + data = {} + data_t = {} + for key in key_size_t: + data[key] = torch.LongTensor(size=key_size_t[key]).random_(0, 1000) + data_t[key] = data[key].clone() + data['keyX'] = torch.FloatTensor(size=(5, )).random_(0, 1000) + data_t['keyX'] = data['keyX'].clone() + if mpu.get_tensor_model_parallel_rank() != 0: + data = None + + data_utils._check_data_types(keys, data_t, torch.int64) + key_size, key_numel, \ + total_numel = data_utils._build_key_size_numel_dictionaries(keys, data) + for key in keys: + assert key_size[key] == key_size_t[key] + total_numel_t = 0 + for key in keys: + target_size = functools.reduce(operator.mul, key_size_t[key], 1) + assert key_numel[key] == target_size + total_numel_t += target_size + assert total_numel == total_numel_t + + data_b = data_utils.broadcast_data(keys, data, torch.int64) + for key in keys: + tensor = data_t[key].to(get_accelerator().device_name()) + assert data_b[key].sub(tensor).abs().max() == 0 + + # Reset groups + mpu.destroy_tensor_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test test broadcast data') + test_broadcast_data(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_layers.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..f524f944412c277490a82ca63e8359af00cf8fd1 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_layers.py @@ -0,0 +1,517 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from mpu import layers +from commons import set_random_seed +from commons import print_separator +from commons import initialize_distributed +import mpu +from torch.nn.parameter import Parameter +import torch.nn.init as init +import torch +import random +import sys +sys.path.append("../..") + +device_name = get_accelerator().device_name() +def test_parallel_embedding(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing parallel embedding with model parallel size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + batch_size = 17 + seq_length = 23 + vocab_size = 48 + hidden_size = 16 + seed = 1236 + + set_random_seed(123) + input_data = torch.LongTensor( + size=(batch_size, seq_length)).random_(0, vocab_size).to(device_name) + loss_weight = torch.randn([batch_size, seq_length, hidden_size]).to(device_name) + + set_random_seed(seed) + embedding_original = torch.nn.Embedding(vocab_size, hidden_size).to(device_name) + + output = embedding_original(input_data) + loss_original = torch.mul(output, loss_weight).sum() + loss_original.backward() + + set_random_seed(seed) + embedding_parallel = layers.ParallelEmbedding( + vocab_size, hidden_size, init_method=init.normal_).to(device_name) + output = embedding_parallel(input_data) + loss_parallel = torch.mul(output, loss_weight).sum() + loss_parallel.backward() + + set_random_seed(seed) + embedding_vocab_parallel = layers.VocabParallelEmbedding( + vocab_size, hidden_size, init_method=init.normal_).to(device_name) + output = embedding_vocab_parallel(input_data) + loss_vocab_parallel = torch.mul(output, loss_weight).sum() + loss_vocab_parallel.backward() + + torch.distributed.barrier() + error = loss_parallel.sub(loss_original).abs() + print(' error in loss (parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + torch.distributed.barrier() + error = loss_vocab_parallel.sub(loss_original).abs() + print(' error in loss (vocab parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + weight_grad_orig = torch.split(embedding_original.weight.grad, + hidden_size // tensor_model_parallel_size, + 1)[mpu.get_tensor_model_parallel_rank()] + error = embedding_parallel.weight.grad.sub(weight_grad_orig).abs().max() + print(' error in grad (parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + weight_grad_orig = torch.split(embedding_original.weight.grad, + vocab_size // tensor_model_parallel_size, + 0)[mpu.get_tensor_model_parallel_rank()] + error = embedding_vocab_parallel.weight.grad.sub( + weight_grad_orig).abs().max() + print(' error in grad (vocab parallel) on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-12, 'error: {}'.format(error) + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_initialize_affine_weight(tensor_model_parallel_size): + + mpu.initialize_model_parallel(tensor_model_parallel_size) + if torch.distributed.get_rank() == 0: + print('> testing initialize_affine_weight with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + + # --------------- + # Column parallel + # --------------- + weight = torch.empty(output_size_coeff, input_size) + set_random_seed(seed) + layers._initialize_affine_weight(weight, output_size, input_size, + + output_size_coeff, 0, + torch.nn.init.normal_) + # Target. + set_random_seed(seed) + master_weight = torch.empty(output_size, input_size) + torch.nn.init.normal_(master_weight) + rank = mpu.get_tensor_model_parallel_rank() + my_weight = torch.split(master_weight, output_size_coeff, + dim=0)[rank].contiguous().clone() + + # Compare. + error = weight.sub(my_weight).abs().max() + torch.distributed.barrier() + print(' column parallel max error (should be zero) on global rank ' + '{}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # ------------ + # Row parallel + # ------------ + weight = torch.empty(output_size, input_size_coeff) + set_random_seed(seed) + mpu.layers._initialize_affine_weight(weight, output_size, input_size, + input_size_coeff, 1, + torch.nn.init.normal_) + # Target. + set_random_seed(seed) + master_weight = torch.empty(output_size, input_size) + torch.nn.init.normal_(master_weight) + rank = mpu.get_tensor_model_parallel_rank() + my_weight = torch.split(master_weight, input_size_coeff, + dim=1)[rank].contiguous().clone() + + # Compare. + error = weight.sub(my_weight).abs().max() + torch.distributed.barrier() + print(' row parallel max error (should be zero) on global rank ' + '{}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +class IdentityLayer2D(torch.nn.Module): + def __init__(self, m, n): + super(IdentityLayer2D, self).__init__() + self.weight = Parameter(torch.Tensor(m, n)) + torch.nn.init.xavier_normal_(self.weight) + + def forward(self): + return self.weight + + +def test_column_parallel_linear(tensor_model_parallel_size): + + mpu.initialize_model_parallel(tensor_model_parallel_size) + if torch.distributed.get_rank() == 0: + print('> testing ColumnParallelLinear with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + batch_size = 7 + + # Network + identity_layer = IdentityLayer2D(batch_size, input_size).to(device_name) + linear_layer = mpu.ColumnParallelLinear( + input_size, output_size, keep_master_weight_for_test=True).to(device_name) + loss_weight = torch.randn([batch_size, output_size]).to(device_name) + # Forward + input_ = identity_layer() + output = linear_layer(input_) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + # Values. + dLdY = loss_weight + X = identity_layer.weight + A = linear_layer.master_weight.to(device_name) + dLdA = torch.matmul(dLdY.t(), X) + dLdb = torch.matmul(torch.ones(batch_size, 1).to(device_name).t(), dLdY).view(-1) + dLdX = torch.matmul(dLdY, A) + + rank = mpu.get_tensor_model_parallel_rank() + my_dLdA = torch.split(dLdA, output_size_coeff, + dim=0)[rank].contiguous().clone() + error = my_dLdA.sub(linear_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdA on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + my_dLdb = torch.split(dLdb, output_size_coeff, + dim=0)[rank].contiguous().clone() + error = my_dLdb.sub(linear_layer.bias.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdb on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdX.sub(identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdX on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +def test_row_parallel_linear(tensor_model_parallel_size): + + mpu.initialize_model_parallel(tensor_model_parallel_size) + if torch.distributed.get_rank() == 0: + print('> testing RowParallelLinear with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + input_size_coeff = 13 + input_size = input_size_coeff * tensor_model_parallel_size + output_size_coeff = 17 + output_size = output_size_coeff * tensor_model_parallel_size + batch_size = 7 + + # Network + identity_layer = IdentityLayer2D(batch_size, input_size).to(device_name) + linear_layer = mpu.RowParallelLinear( + input_size, output_size, keep_master_weight_for_test=True).to(device_name) + loss_weight = torch.randn([batch_size, output_size]).to(device_name) + # Forward + input_ = identity_layer() + output = linear_layer(input_) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + # Values. + dLdY = loss_weight + X = identity_layer.weight + A = linear_layer.master_weight.to(device_name) + dLdA = torch.matmul(dLdY.t(), X) + dLdb = torch.matmul(torch.ones(batch_size, 1).to(device_name).t(), dLdY).view(-1) + dLdX = torch.matmul(dLdY, A) + + rank = mpu.get_tensor_model_parallel_rank() + my_dLdA = torch.split(dLdA, input_size_coeff, + dim=1)[rank].contiguous().clone() + error = my_dLdA.sub(linear_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdA on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdb.sub(linear_layer.bias.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdb on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + error = dLdX.sub(identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' error in dLdX on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +class IdentityLayer3D(torch.nn.Module): + def __init__(self, m, n, k): + super(IdentityLayer3D, self).__init__() + self.weight = Parameter(torch.Tensor(m, n, k)) + torch.nn.init.xavier_normal_(self.weight) + + def forward(self): + return self.weight + + +def parallel_self_attention(tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, + sequence_length): + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + + num_att_heads = num_att_heads_per_partition * \ + torch.distributed.get_world_size() + hidden_size = hidden_size_per_att_head * num_att_heads + + # Network + identity_layer = IdentityLayer3D(batch_size, sequence_length, + hidden_size).to(device_name) + attention_layer = mpu.BertParallelSelfAttention(hidden_size, num_att_heads, + dropout_prob).to(device_name) + loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).to(device_name) + attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).to(device_name) + # Forward + input_ = identity_layer() + output = attention_layer(input_, attention_mask) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + rank = mpu.get_tensor_model_parallel_rank() + mpu.destroy_model_parallel() + return rank, hidden_size, tensor_model_parallel_size, loss, \ + attention_layer, identity_layer + + +def test_parallel_self_attention(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing ParallelSelfAttention with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + + num_att_heads_per_partition = 3 + hidden_size_per_att_head = 7 + dropout_prob = 0.0 # has to be zero + batch_size = 5 + sequence_length = 13 + + rank_1, hideen_size_1, tensor_model_parallel_size_1, loss_1, \ + attention_layer_1, identity_layer_1 = parallel_self_attention( + 1, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, sequence_length) + + rank, hidden_size, tensor_model_parallel_size, loss, \ + attention_layer, identity_layer = parallel_self_attention( + tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, dropout_prob, batch_size, sequence_length) + assert hideen_size_1 == hidden_size + + error = loss_1.sub(loss).abs().max() + torch.distributed.barrier() + print(' loss error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + my_lin_grad_list = torch.split( + attention_layer_1.query_key_value.weight.grad, + hidden_size // tensor_model_parallel_size, 0)[rank::tensor_model_parallel_size] + my_lin_grad = torch.cat(my_lin_grad_list, dim=0) + error = my_lin_grad.sub( + attention_layer.query_key_value.weight.grad).abs().max() + torch.distributed.barrier() + print(' weight gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + error = identity_layer_1.weight.grad.sub( + identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' input gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-6 + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +def parallel_transformer(tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, batch_size, sequence_length): + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed = 12345 + set_random_seed(seed) + + num_att_heads = num_att_heads_per_partition * \ + torch.distributed.get_world_size() + hidden_size = hidden_size_per_att_head * num_att_heads + intermediate_size = 4 * hidden_size + + # Network + identity_layer = IdentityLayer3D(batch_size, sequence_length, + hidden_size).to(device_name) + transformer_layer = mpu.BertParallelTransformerLayer( + hidden_size, intermediate_size, num_att_heads, 0.0, 0.0, + torch.nn.functional.relu, 1.0e-5).to(device_name) + + loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).to(device_name) + attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).to(device_name) + # Forward + input_ = identity_layer() + output = transformer_layer(input_, attention_mask) + loss = torch.mul(output, loss_weight).sum() + # Backward + loss.backward() + + rank = mpu.get_tensor_model_parallel_rank() + mpu.destroy_model_parallel() + return rank, hidden_size, tensor_model_parallel_size, loss, \ + transformer_layer, identity_layer + + +def test_parallel_transformer_layer(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing ParallelTransformerLayer with model parallel ' + 'size: {}'.format(tensor_model_parallel_size)) + + num_att_heads_per_partition = 3 + hidden_size_per_att_head = 7 + batch_size = 5 + sequence_length = 13 + + rank_1, hidden_size_1, tensor_model_parallel_size_1, loss_1, \ + transformer_layer_1, identity_layer_1 = parallel_transformer( + 1, num_att_heads_per_partition, + hidden_size_per_att_head, batch_size, sequence_length) + + rank, hidden_size, tensor_model_parallel_size, loss, \ + transformer_layer, identity_layer = parallel_transformer( + tensor_model_parallel_size, num_att_heads_per_partition, + hidden_size_per_att_head, batch_size, sequence_length) + + error = loss_1.sub(loss).abs().max() + torch.distributed.barrier() + print(' loss error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-5, 'error: {}'.format(error) + + error = identity_layer_1.weight.grad.sub( + identity_layer.weight.grad).abs().max() + torch.distributed.barrier() + print(' input gradient error on global rank {}: {}'.format( + torch.distributed.get_rank(), error)) + assert error < 5.0e-5, 'error: {}'.format(error) + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print(' >> passed the test :-)') + + +if __name__ == '__main__': + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + print_separator('test initialize affine weight') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_initialize_affine_weight(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test parallel embedding') + test_parallel_embedding(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test column-parallel linear') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_column_parallel_linear(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test row-parallel linear') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_row_parallel_linear(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test parallel self-attention') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_parallel_self_attention(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + print_separator('test parallel transformer') + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + test_parallel_transformer_layer(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_random.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_random.py new file mode 100644 index 0000000000000000000000000000000000000000..21dcd10907801752515b911f476fac41d64b7d10 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/mpu/tests/test_random.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +from commons import print_separator +from commons import initialize_distributed +import mpu +import torch +import sys +sys.path.append("../..") + + +def test_set_cuda_rng_state(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing set_rng_state with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + size = 123 + seed = 1234 + get_accelerator().manual_seed(1234) + tensor = get_accelerator().FloatTensor(size) + + # Get the state + rng_state = get_accelerator().get_rng_state() + rng_state_copy = rng_state.clone() + + # Do some stuff. + for _ in range(5): + torch.randn(size, out=tensor) + result_1 = tensor.clone() + + assert rng_state.sub(rng_state_copy).max() == 0 + assert get_accelerator().get_rng_state().sub(rng_state_copy).max() > 0 + + # State should be different. + new_rng_state = get_accelerator().get_rng_state() + max_diff = new_rng_state.sub(rng_state).max() + print(' max diff in rng state (should be non-zero) on global rank {}: {}'. + format(torch.distributed.get_rank(), max_diff)) + assert max_diff > 0 + + # Reset the rng state and do the same stuff. + mpu.random._set_cuda_rng_state(rng_state) + for _ in range(5): + torch.randn(size, out=tensor) + mpu.random._set_cuda_rng_state(rng_state) + for _ in range(5): + torch.randn(size, out=tensor) + result_2 = tensor.clone() + + # Results should be the same + error = result_2.sub(result_1).abs().max() + print(' max error in generated tensors (should be zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Input state should have remained intact. + error = rng_state.sub(rng_state_copy).max() + print(' max error in rng state (should be zero) on global rank {}: {}'. + format(torch.distributed.get_rank(), error)) + assert error == 0 + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_cuda_rng_tracker(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing cuda rng tracker with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + seed_1 = 1234 + seed_2 = 4321 + size = [12, 21] + tensor = get_accelerator().FloatTensor(size) + + # Set to seed_1 and generate two tensors. + get_accelerator().manual_seed(seed_1) + torch.randn(size, out=tensor) + target_11 = tensor.clone() + torch.randn(size, out=tensor) + target_12 = tensor.clone() + + # Set to seed_2 and generate two tensors. + get_accelerator().manual_seed(seed_2) + torch.randn(size, out=tensor) + target_21 = tensor.clone() + torch.randn(size, out=tensor) + target_22 = tensor.clone() + + # Now if we interleave seed_1 and seed_2, + # we should still get the same tensors + get_accelerator().manual_seed(seed_1) + mpu.get_cuda_rng_tracker().add('test', seed_2) + + torch.randn(size, out=tensor) + result_11 = tensor.clone() + + with mpu.get_cuda_rng_tracker().fork('test'): + torch.randn(size, out=tensor) + result_21 = tensor.clone() + + torch.randn(size, out=tensor) + result_12 = tensor.clone() + + with mpu.get_cuda_rng_tracker().fork('test'): + torch.randn(size, out=tensor) + result_22 = tensor.clone() + + diff = result_11.sub(result_21).abs().max() + diff = min(diff, result_12.sub(result_22).abs().max()) + print(' max diff in generated tensors (should be non-zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), diff)) + assert diff > 1.0e-6 + error = max(result_11.sub(target_11).abs().max(), + result_12.sub(target_12).abs().max()) + error = max(error, result_21.sub(target_21).abs().max()) + error = max(error, result_22.sub(target_22).abs().max()) + print(' max error in generated tensors (should be zero) on ' + 'global rank {}: {}'.format(torch.distributed.get_rank(), error)) + assert error < 1.0e-6 + + # Reset the tracker + mpu.get_cuda_rng_tracker().reset() + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +def test_model_parallel_cuda_manual_seed(tensor_model_parallel_size): + + if torch.distributed.get_rank() == 0: + print('> testing model parallel cuda manual seed with size {} ...'. + format(tensor_model_parallel_size)) + + mpu.initialize_model_parallel(tensor_model_parallel_size) + tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size() + + mpu.model_parallel_cuda_manual_seed(12345) + assert get_accelerator().initial_seed() == 12345 + with mpu.get_cuda_rng_tracker().fork(): + assert get_accelerator().initial_seed() == (12345 + 2718 + + mpu.get_tensor_model_parallel_rank()) + + # Reset the tracker + mpu.get_cuda_rng_tracker().reset() + + # Reset groups + mpu.destroy_model_parallel() + + torch.distributed.barrier() + if torch.distributed.get_rank() == 0: + print('>> passed the test :-)') + + +if __name__ == '__main__': + + initialize_distributed() + world_size = torch.distributed.get_world_size() + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test set rng state') + test_set_cuda_rng_state(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test cuda rng tracker') + test_cuda_rng_tracker(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 + + tensor_model_parallel_size = 1 + while tensor_model_parallel_size <= world_size: + print_separator('test model parallel cuda manual seed') + test_model_parallel_cuda_manual_seed(tensor_model_parallel_size) + tensor_model_parallel_size *= 2 diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/optimizer_param_scheduler.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/optimizer_param_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..7304b257550f0bf70130193948c7a24b9edc3a4e --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/optimizer_param_scheduler.py @@ -0,0 +1,277 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Learning rate decay and weight decay incr functions.""" + +import math + +from megatron import print_rank_0, get_args + +class OptimizerParamScheduler(object): + """Anneals learning rate and weight decay""" + + def __init__(self, optimizer, max_lr, min_lr, + lr_warmup_steps, lr_decay_steps, lr_decay_style, + start_wd, end_wd, wd_incr_steps, wd_incr_style, + use_checkpoint_opt_param_scheduler=True, + override_opt_param_scheduler=False): + args = get_args() + # Class values. + self.optimizer = optimizer + + self.max_lr = float(max_lr) + self.min_lr = min_lr + assert self.min_lr >= 0.0 + assert self.max_lr >= self.min_lr + + self.lr_warmup_steps = lr_warmup_steps + self.num_steps = 0 + self.lr_decay_steps = lr_decay_steps + assert self.lr_decay_steps > 0 + assert self.lr_warmup_steps < self.lr_decay_steps + + self.lr_decay_tokens = args.lr_decay_tokens + self.num_tokens = 0 + self.lr_warmup_tokens = args.lr_warmup_tokens + + self.lr_decay_style = lr_decay_style + + self.start_wd = start_wd + self.end_wd = end_wd + assert self.start_wd >= 0.0 + assert self.end_wd >= self.start_wd + self.wd_incr_steps = wd_incr_steps + self.wd_incr_style = wd_incr_style + + self.override_opt_param_scheduler = override_opt_param_scheduler + self.use_checkpoint_opt_param_scheduler = use_checkpoint_opt_param_scheduler + if self.override_opt_param_scheduler: + assert not self.use_checkpoint_opt_param_scheduler, 'both override and '\ + 'use-checkpoint are set.' + + # Set the learning rate + self.step(0) + print_rank_0('> learning rate decay style: {}'.format(self.lr_decay_style)) + + + def get_wd(self): + """ Weight decay incr functions""" + if self.num_steps > self.wd_incr_steps: + return self.end_wd + + if self.wd_incr_style == 'constant': + assert self.start_wd == self.end_wd + return self.end_wd + + incr_ratio = float(self.num_steps) / float(self.wd_incr_steps) + assert incr_ratio >= 0.0 + assert incr_ratio <= 1.0 + delta_wd = self.end_wd - self.start_wd + + if self.wd_incr_style == 'linear': + coeff = incr_ratio + elif self.wd_incr_style == 'cosine': + coeff = 0.5 * (math.cos(math.pi * (1 - incr_ratio)) + 1.0) + else: + raise Exception('{} weight decay increment style is not supported.'.format( + self.wd_incr_style)) + + return self.start_wd + coeff * delta_wd + + + def get_lr(self): + """Learning rate decay functions from: + https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" + + # Use linear warmup for the initial part. + if self.lr_warmup_tokens is None: + if self.lr_warmup_steps > 0 and self.num_steps <= self.lr_warmup_steps: + if self.num_steps == self.lr_warmup_steps and \ + self.lr_decay_tokens is not None: + # The case of step/sample-wise warmup + token-wise decay + self.lr_warmup_tokens = self.num_tokens + return self.max_lr * float(self.num_steps) / \ + float(self.lr_warmup_steps) + else: + if self.lr_warmup_tokens > 0 and self.num_tokens <= self.lr_warmup_tokens: + return self.max_lr * float(self.num_tokens) / \ + float(self.lr_warmup_tokens) + + # If the learning rate is constant, just return the initial value. + if self.lr_decay_style == 'constant': + return self.max_lr + + # For any steps larger than `self.lr_decay_steps`, use `self.min_lr`. + if self.lr_decay_tokens is None: + if self.num_steps > self.lr_decay_steps: + return self.min_lr + else: + if self.num_tokens > self.lr_decay_tokens: + return self.min_lr + + # If we are done with the warmup period, use the decay style. + if self.lr_decay_style == 'inverse-square-root': + if self.lr_warmup_tokens is None: + warmup_steps = max(self.lr_warmup_steps, 1) + num_steps = max(self.num_steps, 1) + lr = self.max_lr * warmup_steps ** 0.5 / (num_steps ** 0.5) + else: + warmup_tokens = max(self.lr_warmup_tokens, 1) + num_tokens = max(self.num_tokens, 1) + lr = self.max_lr * warmup_tokens ** 0.5 / (num_tokens ** 0.5) + return max(self.min_lr, lr) + + if self.lr_decay_tokens is None: + num_steps_ = self.num_steps - self.lr_warmup_steps + decay_steps_ = self.lr_decay_steps - self.lr_warmup_steps + decay_ratio = float(num_steps_) / float(decay_steps_) + else: + num_tokens_ = self.num_tokens - self.lr_warmup_tokens + decay_tokens_ = self.lr_decay_tokens - self.lr_warmup_tokens + decay_ratio = float(num_tokens_) / float(decay_tokens_) + assert decay_ratio >= 0.0 + assert decay_ratio <= 1.0 + delta_lr = self.max_lr - self.min_lr + + if self.lr_decay_style == 'linear': + coeff = (1.0 - decay_ratio) + elif self.lr_decay_style == 'cosine': + coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0) + else: + raise Exception('{} decay style is not supported.'.format( + self.lr_decay_style)) + + return self.min_lr + coeff * delta_lr + + + def step(self, increment, token_num=None): + """Set lr for all parameters groups.""" + if token_num is None: + args = get_args() + token_num = args.consumed_train_tokens + self.num_tokens = token_num + self.num_steps += increment + new_lr = self.get_lr() + new_wd = self.get_wd() + for group in self.optimizer.param_groups: + group['lr'] = new_lr * group.get('lr_mult', 1.0) + group['weight_decay'] = new_wd * group.get('wd_mult', 1.0) + + + def state_dict(self): + state_dict = { + 'max_lr': self.max_lr, + 'lr_warmup_steps': self.lr_warmup_steps, + 'lr_warmup_tokens': self.lr_warmup_tokens, + 'num_steps': self.num_steps, + 'num_tokens': self.num_tokens, + 'lr_decay_style': self.lr_decay_style, + 'lr_decay_steps': self.lr_decay_steps, + 'lr_decay_tokens': self.lr_decay_tokens, + 'min_lr': self.min_lr, + 'start_wd': self.start_wd, + 'end_wd': self.end_wd, + 'wd_incr_style': self.wd_incr_style, + 'wd_incr_steps': self.wd_incr_steps + } + return state_dict + + + def _check_and_set(self, cls_value, sd_value, name): + """Auxiliary function for checking the values in the checkpoint and + setting them.""" + if self.override_opt_param_scheduler: + print_rank_0(' > overriding {} value to {}'.format(name, cls_value)) + return cls_value + + if not self.use_checkpoint_opt_param_scheduler: + assert cls_value == sd_value, \ + f'OptimizerParamScheduler: class input value {cls_value} and checkpoint' \ + f'value {sd_value} for {name} do not match' + print_rank_0(' > using checkpoint value {} for {}'.format(sd_value, + name)) + return sd_value + + + def load_state_dict(self, sd): + + if 'start_lr' in sd: + max_lr_ = sd['start_lr'] + else: + max_lr_ = sd['max_lr'] + self.max_lr = self._check_and_set(self.max_lr, max_lr_, + 'learning rate') + + self.min_lr = self._check_and_set(self.min_lr, sd['min_lr'], + 'minimum learning rate') + + if 'warmup_iter' in sd: + lr_warmup_steps_ = sd['warmup_iter'] + elif 'warmup_steps' in sd: + lr_warmup_steps_ = sd['warmup_steps'] + else: + lr_warmup_steps_ = sd['lr_warmup_steps'] + self.lr_warmup_steps = self._check_and_set(self.lr_warmup_steps, + lr_warmup_steps_, + 'warmup iterations') + if 'warmup_tokens' in sd: + lr_warmup_tokens_ = sd['warmup_tokens'] + else: + lr_warmup_tokens_ = sd['lr_warmup_tokens'] + self.lr_warmup_tokens = self._check_and_set(self.lr_warmup_tokens, + lr_warmup_tokens_, + 'warmup tokens') + + if 'end_iter' in sd: + lr_decay_steps_ = sd['end_iter'] + elif 'decay_steps' in sd: + lr_decay_steps_ = sd['decay_steps'] + else: + lr_decay_steps_ = sd['lr_decay_steps'] + self.lr_decay_steps = self._check_and_set(self.lr_decay_steps, lr_decay_steps_, + 'total number of iterations') + if 'decay_tokens' in sd: + lr_decay_tokens_ = sd['decay_tokens'] + else: + lr_decay_tokens_ = sd['lr_decay_tokens'] + self.lr_decay_tokens = self._check_and_set(self.lr_decay_tokens, + lr_decay_tokens_, + 'decay tokens') + + if 'decay_style' in sd: + lr_decay_style_ = sd['decay_style'] + else: + lr_decay_style_ = sd['lr_decay_style'] + self.lr_decay_style = self._check_and_set(self.lr_decay_style, + lr_decay_style_, + 'learning rate decay style') + + if 'num_iters' in sd: + num_steps = sd['num_iters'] + else: + num_steps = sd['num_steps'] + if 'num_tokens' in sd: + self.num_tokens = sd['num_tokens'] + self.step(increment=num_steps, token_num=self.num_tokens) + + + if 'start_wd' in sd: + self.start_wd = self._check_and_set(self.start_wd, + sd['start_wd'], + "start weight decay") + self.end_wd = self._check_and_set(self.end_wd, + sd['end_wd'], + "end weight decay") + self.wd_incr_steps = self._check_and_set(self.wd_incr_steps, + sd['wd_incr_steps'], + "total number of weight decay iterations") + self.wd_incr_style = self._check_and_set(self.wd_incr_style, + sd['wd_incr_style'], + "weight decay incr style") + + + + + + + + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/p2p_communication.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/p2p_communication.py new file mode 100644 index 0000000000000000000000000000000000000000..770060a8899f6d39d8b95cb1ffa60929e3141c9d --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/p2p_communication.py @@ -0,0 +1,264 @@ +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import reduce +import operator +import torch +from deepspeed.accelerator import get_accelerator +from megatron import get_args +from megatron.core import mpu + + +def _communicate(tensor_send_next, tensor_send_prev, recv_prev, recv_next, + use_ring_exchange=False): + """Communicate tensors between stages. Used as helper method in other + communication methods that are used in megatron/schedules.py. + + Takes the following arguments: + tensor_send_next: tensor to send to next rank (no tensor sent if + set to None). + tensor_send_prev: tensor to send to prev rank (no tensor sent if + set to None). + recv_prev: boolean for whether tensor should be received from + previous rank. + recv_next: boolean for whether tensor should be received from + next rank. + use_ring_exchange: boolean for whether torch.distributed.ring_exchange() + API should be used. + + Returns: + (tensor_recv_prev, tensor_recv_next) + """ + args = get_args() + + # Create placeholder tensors for receive in forward and backward directions + # if needed. + tensor_recv_prev = None + tensor_recv_next = None + tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) + if args.scatter_gather_tensors_in_pipeline: + tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1) // \ + mpu.get_tensor_model_parallel_world_size() + else: + tensor_chunk_shape = tensor_shape + dtype = args.params_dtype + if args.fp32_residual_connection: + dtype = torch.float + if recv_prev: + tensor_recv_prev = torch.empty(tensor_chunk_shape, + requires_grad=True, + device=get_accelerator().current_device_name(), + dtype=dtype) + if recv_next: + tensor_recv_next = torch.empty(tensor_chunk_shape, + requires_grad=True, + device=get_accelerator().current_device_name(), + dtype=dtype) + + # Split tensor into smaller chunks if using scatter-gather optimization. + if args.scatter_gather_tensors_in_pipeline: + if tensor_send_next is not None: + tensor_send_next = mpu.split_tensor_into_1d_equal_chunks(tensor_send_next) + + if tensor_send_prev is not None: + tensor_send_prev = mpu.split_tensor_into_1d_equal_chunks(tensor_send_prev) + + # Send tensors in both the forward and backward directions as appropriate. + if use_ring_exchange: + torch.distributed.ring_exchange(tensor_send_prev=tensor_send_prev, + tensor_recv_prev=tensor_recv_prev, + tensor_send_next=tensor_send_next, + tensor_recv_next=tensor_recv_next, + group=mpu.get_pipeline_model_parallel_group()) + else: + ops = [] + if tensor_send_prev is not None: + send_prev_op = torch.distributed.P2POp( + torch.distributed.isend, tensor_send_prev, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(send_prev_op) + if tensor_recv_prev is not None: + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, tensor_recv_prev, + mpu.get_pipeline_model_parallel_prev_rank()) + ops.append(recv_prev_op) + if tensor_send_next is not None: + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, tensor_send_next, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(send_next_op) + if tensor_recv_next is not None: + recv_next_op = torch.distributed.P2POp( + torch.distributed.irecv, tensor_recv_next, + mpu.get_pipeline_model_parallel_next_rank()) + ops.append(recv_next_op) + if len(ops) > 0: + reqs = torch.distributed.batch_isend_irecv(ops) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + get_accelerator().synchronize() + + # If using scatter-gather optimization, gather smaller chunks. + if args.scatter_gather_tensors_in_pipeline: + if recv_prev: + tensor_recv_prev = mpu.gather_split_1d_tensor( + tensor_recv_prev).view(tensor_shape).requires_grad_() + + if recv_next: + tensor_recv_next = mpu.gather_split_1d_tensor( + tensor_recv_next).view(tensor_shape).requires_grad_() + + return tensor_recv_prev, tensor_recv_next + + +def recv_forward(timers=None): + """Receive tensor from previous rank in pipeline (forward receive).""" + if mpu.is_pipeline_first_stage(): + input_tensor = None + else: + if timers is not None: + timers('forward-recv').start() + input_tensor, _ = _communicate( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=True, + recv_next=False) + if timers is not None: + timers('forward-recv').stop() + return input_tensor + + +def recv_backward(timers=None): + """Receive tensor from next rank in pipeline (backward receive).""" + if mpu.is_pipeline_last_stage(): + output_tensor_grad = None + else: + if timers is not None: + timers('backward-recv').start() + _, output_tensor_grad = _communicate( + tensor_send_next=None, + tensor_send_prev=None, + recv_prev=False, + recv_next=True) + if timers is not None: + timers('backward-recv').stop() + return output_tensor_grad + + +def send_forward(output_tensor, timers=None): + """Send tensor to next rank in pipeline (forward send).""" + if not mpu.is_pipeline_last_stage(): + if timers is not None: + timers('forward-send').start() + _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=False) + if timers is not None: + timers('forward-send').stop() + + +def send_backward(input_tensor_grad, timers=None): + """Send tensor to previous rank in pipeline (backward send).""" + if not mpu.is_pipeline_first_stage(): + if timers is not None: + timers('backward-send').start() + _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=False) + if timers is not None: + timers('backward-send').stop() + + +def send_forward_recv_backward(output_tensor, timers=None): + """Batched send and recv with next rank in pipeline.""" + if mpu.is_pipeline_last_stage(): + output_tensor_grad = None + else: + if timers is not None: + timers('forward-send-backward-recv').start() + _, output_tensor_grad = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=False, + recv_next=True) + if timers is not None: + timers('forward-send-backward-recv').stop() + return output_tensor_grad + + +def send_backward_recv_forward(input_tensor_grad, timers=None): + """Batched send and recv with previous rank in pipeline.""" + if mpu.is_pipeline_first_stage(): + input_tensor = None + else: + if timers is not None: + timers('backward-send-forward-recv').start() + input_tensor, _ = _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=True, + recv_next=False) + if timers is not None: + timers('backward-send-forward-recv').stop() + return input_tensor + + +def send_forward_recv_forward(output_tensor, recv_prev, timers=None): + """Batched recv from previous rank and send to next rank in pipeline.""" + if timers is not None: + timers('forward-send-forward-recv').start() + input_tensor, _ = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=None, + recv_prev=recv_prev, + recv_next=False) + if timers is not None: + timers('forward-send-forward-recv').stop() + return input_tensor + + +def send_backward_recv_backward(input_tensor_grad, recv_next, timers=None): + """Batched recv from next rank and send to previous rank in pipeline.""" + if timers is not None: + timers('backward-send-backward-recv').start() + _, output_tensor_grad = _communicate( + tensor_send_next=None, + tensor_send_prev=input_tensor_grad, + recv_prev=False, + recv_next=recv_next) + if timers is not None: + timers('backward-send-backward-recv').stop() + return output_tensor_grad + + +def send_forward_backward_recv_forward_backward( + output_tensor, input_tensor_grad, recv_prev, + recv_next, timers=None): + """Batched send and recv with previous and next ranks in pipeline.""" + if timers is not None: + timers('forward-backward-send-forward-backward-recv').start() + input_tensor, output_tensor_grad = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=input_tensor_grad, + recv_prev=recv_prev, + recv_next=recv_next) + if timers is not None: + timers('forward-backward-send-forward-backward-recv').stop() + return input_tensor, output_tensor_grad diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/__init__.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77da7be30ae4d02bd7ab1e4bae86afc8923d4e23 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + + +from .api import ( + generate, + generate_and_post_process, + beam_search_and_post_process) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/api.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/api.py new file mode 100644 index 0000000000000000000000000000000000000000..090b630a5f39fbbaaa8a04b42cf89d31a8c96d67 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/api.py @@ -0,0 +1,200 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Inference API.""" + + +import torch + +from megatron.core import mpu +from .communication import broadcast_float_list +from .generation import ( + generate_tokens_probs_and_return_on_first_stage, + score_and_return_on_first_stage, + beam_search_and_return_on_first_stage) +from .tokenization import ( + tokenize_prompts, + detokenize_generations) + +def generate_and_post_process(model, + prompts=None, + tokens_to_generate=0, + return_output_log_probs=False, + top_k_sampling=0, + top_p_sampling=0.0, + top_p_decay=0.0, + top_p_bound=0.0, + temperature=1.0, + add_BOS=False, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=False, + random_seed=-1): + """Run inference and post-process outputs, i.e., detokenize, + move to cpu and convert to list.""" + + # Main inference. + tokens, lengths, output_log_probs = generate( + model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + return_output_log_probs=return_output_log_probs, + top_k_sampling=top_k_sampling, + top_p_sampling=top_p_sampling, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + add_BOS=add_BOS, + use_eod_token_for_early_termination=use_eod_token_for_early_termination, + stop_on_double_eol=stop_on_double_eol, + stop_on_eol=stop_on_eol, + prevent_newline_after_colon=prevent_newline_after_colon, + random_seed=random_seed) + + # Only post-process on first stage. + if mpu.is_pipeline_first_stage(): + tokens, prompts_plus_generations, prompts_plus_generations_segments = \ + detokenize_generations(tokens, lengths, True) + + if return_output_log_probs: + output_log_probs = output_log_probs.cpu().numpy().tolist() + for i, (prob, seg) in enumerate(zip(output_log_probs, prompts_plus_generations_segments)): + output_log_probs[i] = prob[:len(seg)-1] + + return prompts_plus_generations, prompts_plus_generations_segments, \ + output_log_probs, tokens + + return None + +def generate(model, + prompts=None, + tokens_to_generate=0, + return_output_log_probs=False, + top_k_sampling=0, + top_p_sampling=0.0, + top_p_decay=0.0, + top_p_bound=0.0, + temperature=1.0, + add_BOS=False, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=False, + random_seed=-1): + """Given prompts and input parameters, run inference and return: + tokens: prompts plus the generated tokens. + lengths: length of the prompt + generations. Note that we can + discard tokens in the tokens tensor that are after the + corresponding length. + output_log_probs: log probs of the tokens. + """ + + # Make sure input params are avaialble to all ranks. + values = [tokens_to_generate, + return_output_log_probs, + top_k_sampling, top_p_sampling, top_p_decay, top_p_bound, + temperature, add_BOS, use_eod_token_for_early_termination, + stop_on_double_eol, + stop_on_eol, + prevent_newline_after_colon, + random_seed] + values_float_tensor = broadcast_float_list(len(values), float_list=values) + tokens_to_generate = int(values_float_tensor[0].item()) + return_output_log_probs = bool(values_float_tensor[1].item()) + top_k_sampling = int(values_float_tensor[2].item()) + top_p_sampling = values_float_tensor[3].item() + top_p_decay = values_float_tensor[4].item() + top_p_bound = values_float_tensor[5].item() + temperature = values_float_tensor[6].item() + add_BOS = bool(values_float_tensor[7].item()) + use_eod_token_for_early_termination = bool(values_float_tensor[8].item()) + stop_on_double_eol = bool(values_float_tensor[9].item()) + stop_on_eol = bool(values_float_tensor[10].item()) + prevent_newline_after_colon = bool(values_float_tensor[11].item()) + random_seed = int(values_float_tensor[12].item()) + + if random_seed != -1: + torch.random.manual_seed(random_seed) + + # Tokenize prompts and get the batch. + # Note that these tensors are broadcaseted to all ranks. + if torch.distributed.get_rank() == 0: + assert prompts is not None + + context_tokens_tensor, context_length_tensor = tokenize_prompts( + prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS) + + if tokens_to_generate == 0: + return score_and_return_on_first_stage( + model, context_tokens_tensor, context_length_tensor) + + # Main inference function. + # Note that the outputs are available on the first stage. + return generate_tokens_probs_and_return_on_first_stage( + model, context_tokens_tensor, context_length_tensor, + return_output_log_probs=return_output_log_probs, + top_k=top_k_sampling, + top_p=top_p_sampling, + top_p_decay=top_p_decay, + top_p_bound=top_p_bound, + temperature=temperature, + use_eod_token_for_early_termination=use_eod_token_for_early_termination, + stop_on_double_eol=stop_on_double_eol, + stop_on_eol=stop_on_eol, + prevent_newline_after_colon=prevent_newline_after_colon) + +def beam_search_and_post_process(model, + prompts=None, + tokens_to_generate=0, + beam_size=0, + add_BOS=False, + stop_token=50256, + num_return_gen=1, + length_penalty=1, + prevent_newline_after_colon=False): + """Run beam search and post-process outputs, i.e., detokenize, + move to cpu and convert to list.""" + + # Main inference. + tokens, scores = beam_search(model, + prompts=prompts, + tokens_to_generate=tokens_to_generate, + beam_size=beam_size, + add_BOS=add_BOS, + stop_token=stop_token, + num_return_gen=num_return_gen, + length_penalty=length_penalty, + prevent_newline_after_colon=prevent_newline_after_colon) + # Only post-process on first stage. + if mpu.is_pipeline_first_stage(): + lengths = tokens.size(1)*torch.ones(beam_size, dtype=torch.int64, device=torch.cuda.current_device()) + tokens, prompts_plus_generations, prompts_plus_generations_segments = detokenize_generations(tokens, lengths, True) + scores = scores.cpu().numpy().tolist() + return prompts_plus_generations, prompts_plus_generations_segments, scores + + return None + +def beam_search(model, prompts=None, tokens_to_generate=0, beam_size=0, add_BOS=False, stop_token=50256, num_return_gen=1, length_penalty=1, prevent_newline_after_colon=False): + # Make sure input params are avaialble to all ranks. + values = [tokens_to_generate, + beam_size, + add_BOS, + stop_token, + num_return_gen, + length_penalty, + prevent_newline_after_colon] + values_float_tensor = broadcast_float_list(len(values), float_list=values) + tokens_to_generate = int(values_float_tensor[0].item()) + beam_size = int(values_float_tensor[1].item()) + add_BOS = bool(values_float_tensor[2].item()) + stop_token = int(values_float_tensor[3].item()) + num_return_gen = int(values_float_tensor[4].item()) + length_penalty = values_float_tensor[5].item() + prevent_newline_after_colon = values_float_tensor[6].item() + + context_tokens_tensor, context_length_tensor = tokenize_prompts( + prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS) + + return beam_search_and_return_on_first_stage(model, context_tokens_tensor, context_length_tensor, + beam_size, stop_token=stop_token, num_return_gen=num_return_gen, length_penalty=length_penalty, + prevent_newline_after_colon=prevent_newline_after_colon) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/beam_utils.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/beam_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..911a64143a86c8521abd9741df22de528a82f692 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/beam_utils.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +## from huggingface beam search +class BeamHypotheses(object): + def __init__(self, num_beams, length_penalty=1.0, early_stopping=False): + """ + Initialize n-best list of hypotheses. + """ + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.num_beams = num_beams + self.beams = [] + self.worst_score = 1e9 + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.beams) + + def add(self, hyp, sum_logprobs, length): + """ + Add a new hypothesis to the list. + """ + score = sum_logprobs / length ** self.length_penalty + if len(self) < self.num_beams or score > self.worst_score: + self.beams.append((score, hyp)) + if len(self) > self.num_beams: + sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) + del self.beams[sorted_scores[0][1]] + self.worst_score = sorted_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs, cur_len): + """ + If there are enough hypotheses and that none of the hypotheses being generated + can become better than the worst one in the heap, then we are done with this sentence. + """ + + if len(self) < self.num_beams: + return False + elif self.early_stopping: + return True + else: + cur_score = best_sum_logprobs / cur_len ** self.length_penalty + ret = self.worst_score >= cur_score + return ret + diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/communication.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/communication.py new file mode 100644 index 0000000000000000000000000000000000000000..dee32077f34904f7585fab0f5180a5d014f7829f --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/communication.py @@ -0,0 +1,185 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Communications utilities.""" + + +import torch + +from megatron.core import mpu + + + +# TODO: use functions from megatron/p2p +def recv_from_prev_pipeline_rank_(recv_buffer=None): + """Receive from previous pipeline stage and update the + input buffer inplace.""" + if not mpu.is_pipeline_first_stage(): + assert recv_buffer is not None + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, recv_buffer, + mpu.get_pipeline_model_parallel_prev_rank()) + reqs = torch.distributed.batch_isend_irecv([recv_prev_op]) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + + +# TODO: use functions from megatron/p2p +def send_to_next_pipeline_rank(tensor=None): + """Send output to the next pipeline stage.""" + if not mpu.is_pipeline_last_stage(): + assert tensor is not None + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, tensor, + mpu.get_pipeline_model_parallel_next_rank()) + reqs = torch.distributed.batch_isend_irecv([send_next_op]) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + + +def _is_cuda(tensor): + """Check if a tensor is not none and is cuda.""" + assert tensor is not None + assert tensor.is_cuda + + + +def _is_cuda_contiguous(tensor): + """Check if a tensor is not none, is cuda, and is contiguous.""" + _is_cuda(tensor) + assert tensor.is_contiguous() + + + +def broadcast_from_last_pipeline_stage(size, dtype, tensor=None): + """Broadcast a tensor from last pipeline stage to all ranks.""" + + is_last_stage = mpu.is_pipeline_last_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if mpu.is_pipeline_first_stage() and is_last_stage: + return tensor + + if is_last_stage: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + # Get the group and corresponding source rank. + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_pipeline_model_parallel_group() + torch.distributed.broadcast(tensor, src, group) + + return tensor + + + +def broadcast_from_last_to_first_pipeline_stage(size, dtype, tensor=None): + """Broadcast tensor values from last stage into the first stage.""" + + is_last_stage = mpu.is_pipeline_last_stage() + is_first_stage = mpu.is_pipeline_first_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if is_first_stage and is_last_stage: + return tensor + # Only first and last stage pipeline stages need to be involved. + if is_last_stage or is_first_stage: + if is_last_stage: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_embedding_group() + # Broadcast from last stage into the first stage. + torch.distributed.broadcast(tensor, src, group) + else: + tensor = None + + return tensor + + + +def copy_from_last_to_first_pipeline_stage(size, dtype, tensor=None): + """Copy tensor values from last stage into the first stage. + Note that the input tensor is updated in place.""" + + is_last_stage = mpu.is_pipeline_last_stage() + is_first_stage = mpu.is_pipeline_first_stage() + # If first stage and last state are the same, then there is no + # pipeline parallelism and no need to communicate. + if is_first_stage and is_last_stage: + return + # Only first and last stage pipeline stages need to be involved. + if is_last_stage or is_first_stage: + _is_cuda(tensor) + is_contiguous = tensor.is_contiguous() + src = mpu.get_pipeline_model_parallel_last_rank() + group = mpu.get_embedding_group() + if is_contiguous: + tensor_ = tensor + else: + if is_last_stage: + tensor_ = tensor.contiguous() + else: + tensor_ = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + # Broadcast from last stage into the first stage. + torch.distributed.broadcast(tensor_, src, group) + # Update the first stage tensor + if is_first_stage and not is_contiguous: + tensor[...] = tensor_ + + + +def broadcast_tensor(size, dtype, tensor=None, rank=0): + """ Given size and type of a tensor on all ranks and the tensor value + only on a specific rank, broadcast from that rank to all other ranks. + """ + + if torch.distributed.get_rank() == rank: + _is_cuda_contiguous(tensor) + else: + tensor = torch.empty(size, + dtype=dtype, + device=torch.cuda.current_device()) + + torch.distributed.broadcast(tensor, rank) + + return tensor + + + +def broadcast_list(size, dtype, list_values=None, rank=0): + """Broadcast a list of values with a given type.""" + + tensor = None + if torch.distributed.get_rank() == rank: + tensor = torch.tensor(list_values, dtype=dtype, + device=torch.cuda.current_device()) + + return broadcast_tensor(size, dtype, tensor=tensor, rank=rank) + + + +def broadcast_int_list(size, int_list=None, rank=0): + """Broadcast a list of interger values.""" + + return broadcast_list(size, torch.int64, list_values=int_list, rank=rank) + + + +def broadcast_float_list(size, float_list=None, rank=0): + """Broadcast a list of float values.""" + + return broadcast_list(size, torch.float32, list_values=float_list, + rank=rank) diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/forward_step.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/forward_step.py new file mode 100644 index 0000000000000000000000000000000000000000..feb087cbb61e28a6f0631c05322c33037117c9e9 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/forward_step.py @@ -0,0 +1,205 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Forward step utilities.""" + +from collections.abc import Iterable + +import torch + +from megatron import get_args +from megatron.core import mpu +from .communication import ( + send_to_next_pipeline_rank, + recv_from_prev_pipeline_rank_) + + + +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + def __init__(self, max_batch_size, max_sequence_len): + """Note that offsets are set to zero and we always set the + flag to allocate memory. After the first call, make sure to + set this flag to False.""" + self.max_sequence_len = max_sequence_len + self.max_batch_size = max_batch_size + self.sequence_len_offset = 0 + self.batch_size_offset = 0 + self.key_value_memory_dict = {} + + def swap_key_value_dict(self, batch_idx): + "swap between batches" + if len(self.key_value_memory_dict) == 0: + raise ValueError("should not swap when dict in empty") + + for layer_number in self.key_value_memory_dict.keys(): + inference_key_memory, inference_value_memory = self.key_value_memory_dict[layer_number] + assert len(batch_idx) == inference_key_memory.shape[1] ## make sure batch size is the same + new_inference_key_memory = inference_key_memory[:, batch_idx] + new_inference_value_memory = inference_value_memory[:, batch_idx] + self.key_value_memory_dict[layer_number] = ( + new_inference_key_memory, new_inference_value_memory) + +class ForwardStep: + """Forward step function with all the communications. + We use a class here to hide the inference parameters + from the outside caller.""" + + def __init__(self, model, max_batch_size, max_sequence_len): + """Set values so we don't need to do it multiple times.""" + # Make sure model is in eval mode. + assert not isinstance(model, Iterable), \ + 'interleaving schedule is not supported for inference' + model.eval() + self.model = model + # Initialize inference parameters. + self.inference_params = InferenceParams(max_batch_size, + max_sequence_len) + # Pipelining arguments. + args = get_args() + self.pipeline_size_larger_than_one = ( + args.pipeline_model_parallel_size > 1) + # Threshold of pipelining. + self.pipelining_batch_x_seqlen = \ + args.inference_batch_times_seqlen_threshold + + + def __call__(self, tokens, position_ids, attention_mask): + """Invocation of the forward methods. Note that self.inference_params + is being modified by the forward step.""" + # Pipelining case. + if self.pipeline_size_larger_than_one: + current_batch_x_seqlen = tokens.size(0) * tokens.size(1) + if current_batch_x_seqlen >= self.pipelining_batch_x_seqlen: + micro_batch_size = \ + max(1, self.pipelining_batch_x_seqlen // tokens.size(1)) + return _with_pipelining_forward_step(self.model, + tokens, + position_ids, + attention_mask, + self.inference_params, + micro_batch_size) + + return _no_pipelining_forward_step(self.model, + tokens, + position_ids, + attention_mask, + self.inference_params) + + + +def _get_recv_buffer_dtype(args): + """Receive happens between the layers.""" + if args.fp32_residual_connection: + return torch.float + return args.params_dtype + + + +def _allocate_recv_buffer(batch_size, sequence_length): + """Receive happens between the layers with size [s, b, h].""" + if mpu.is_pipeline_first_stage(): + return None + args = get_args() + recv_size = (sequence_length, batch_size, args.hidden_size) + return torch.empty(recv_size, + dtype=_get_recv_buffer_dtype(args), + device=torch.cuda.current_device()) + + + +def _forward_step_helper(model, tokens, position_ids, attention_mask, + inference_params, recv_buffer=None): + """Single forward step. Update the allocate memory flag so + only the first time the memory is allocated.""" + batch_size = tokens.size(0) + sequence_length = tokens.size(1) + if recv_buffer is None: + recv_buffer = _allocate_recv_buffer(batch_size, sequence_length) + + # Receive from previous stage. + recv_from_prev_pipeline_rank_(recv_buffer) + + # Forward pass through the model. + model.set_input_tensor(recv_buffer) + output_tensor = model(tokens, position_ids, attention_mask, + inference_params=inference_params) + + # Send output to the next stage. + send_to_next_pipeline_rank(output_tensor) + + return output_tensor + + + +def _no_pipelining_forward_step(model, tokens, position_ids, attention_mask, + inference_params, recv_buffer=None): + """If recv_buffer is none, we will allocate one on the fly.""" + # Run a simple forward pass. + output_tensor = _forward_step_helper(model, tokens, position_ids, + attention_mask, inference_params, + recv_buffer=recv_buffer) + # Update the sequence length offset. + inference_params.sequence_len_offset += tokens.size(1) + + logits = None + if mpu.is_pipeline_last_stage(): + logits = output_tensor + + return logits + + + +def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, + inference_params, micro_batch_size): + """No interleaving is supported.""" + sequence_length = tokens.size(1) + batch_size = tokens.size(0) + + # Divide the batch dimension into micro batches. + num_micro_batches, last_chunk = divmod(batch_size, + micro_batch_size) + if last_chunk > 0: + num_micro_batches += 1 + + # Preallocate memory for output logits. + logits = None + if mpu.is_pipeline_last_stage(): + args = get_args() + logits = torch.empty( + (batch_size, sequence_length, args.padded_vocab_size), + dtype=torch.float32, device=torch.cuda.current_device()) + + # Preallocate recv buffer. + recv_buffer = _allocate_recv_buffer(micro_batch_size, sequence_length) + + for micro_batch_index in range(num_micro_batches): + # Slice among the batch dimenion. + start = micro_batch_index * micro_batch_size + end = min(start + micro_batch_size, batch_size) + this_micro_batch_size = end - start + tokens2use = tokens[start:end, ...] + position_ids2use = position_ids[start:end, ...] + + # Run a simple forward pass. + if this_micro_batch_size != micro_batch_size: + recv_buffer = None + output = _forward_step_helper(model, tokens2use, position_ids2use, + attention_mask, inference_params, + recv_buffer=recv_buffer) + + # Adjust the batch size offset to account for the micro-batch. + inference_params.batch_size_offset += this_micro_batch_size + + # Copy logits. + if mpu.is_pipeline_last_stage(): + logits[start:end, ...] = output + + # Once we are done with all the micro-batches, we can + # adjust the sequence length offset. + inference_params.sequence_len_offset += sequence_length + # and reset the batch size offset + inference_params.batch_size_offset = 0 + + return logits diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/generation.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..79c8b4d1af1058ad259fcf079935d6667a116e34 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/generation.py @@ -0,0 +1,429 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Generation utilities.""" + +import torch +import torch.nn.functional as F + +from megatron import get_args, get_tokenizer +from megatron.core import mpu +from megatron.utils import get_ltor_masks_and_position_ids +from .communication import ( + copy_from_last_to_first_pipeline_stage, + broadcast_from_last_pipeline_stage, + broadcast_from_last_to_first_pipeline_stage) +from .forward_step import ForwardStep +from .sampling import sample +from .beam_utils import BeamHypotheses + +def score_and_return_on_first_stage(model, tokens, lengths): + """Function for just scoring. + Arguments: + model: no interleaving is supported. + tokens: prompt tokens extended to be of size [b, max_prompt_length] + lengths: original prompt length, size: [b] + Note: Outside of model, other parameters only need to be available on + rank 0. + Outputs: + output_log_probs: log probability of the selected tokens. size: [b, s] + """ + + args = get_args() + + batch_size = tokens.size(0) + max_prompt_length = lengths.max().item() + assert max_prompt_length == tokens.size(1) + + if max_prompt_length > args.max_position_embeddings: + raise ValueError("Length of prompt + tokens_to_generate longer than allowed") + + if max_prompt_length * batch_size > args.max_tokens_to_oom: + raise ValueError("Too many tokens. " + str(max_prompt_length*batch_size)+ " is greater than "+str(args.max_tokens_to_oom)) + + # forward step. + forward_step = ForwardStep(model, batch_size, max_prompt_length) + + # =================== + # Pre-allocate memory + # =================== + + # Log probability of the sequence (prompt + generated tokens). + output_log_probs = None + output_log_probs_size = (batch_size, max_prompt_length - 1) + + if mpu.is_pipeline_last_stage(): + output_log_probs = torch.empty(output_log_probs_size, + dtype=torch.float32, + device=torch.cuda.current_device()) + + # ============= + # Run infernece + # ============= + with torch.no_grad(): + attention_mask, position_ids = _build_attention_mask_and_position_ids(tokens) + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens, position_ids, attention_mask) + + if mpu.is_pipeline_last_stage(): + # Always the last stage should have an output. + assert logits is not None + log_probs = F.log_softmax(logits, dim=2) + + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + indices = torch.unsqueeze(tokens[:, 1:], 2) + output_log_probs = torch.gather(log_probs, 2, indices).squeeze(2) + + # ====================================== + # Broadcast to the first pipeline stage. + # ====================================== + output_log_probs = broadcast_from_last_to_first_pipeline_stage( + output_log_probs_size, torch.float32, output_log_probs) + + return tokens, lengths, output_log_probs + +def generate_tokens_probs_and_return_on_first_stage( + model, tokens, lengths, + return_output_log_probs=False, + top_k=0, top_p=0.0, top_p_decay=0.0, top_p_bound=0.0, + temperature=1.0, + use_eod_token_for_early_termination=True, + stop_on_double_eol=False, + stop_on_eol=False, + prevent_newline_after_colon=True + ): + """Main token generation function. + Arguments: + model: no interleaving is supported. + tokens: prompt tokens extended to be of size [b, max-sequence-length] + lengths: original prompt length, size: [b] + return_output_log_probs: flag to calculate the log probability of + the generated tokens. Note that the log probability is the one + from the original logit. + top_k, top_p: top-k and top-p sampling parameters. + Note that top-k = 1 is gready. Also, these paramters are + exclusive meaning that: + if top-k > 0 then we expect top-p=0. + if top-p > 0 then we check for top-k=0. + temperature: sampling temperature. + use_eod_token_for_early_termination: if True, do early termination if + all the sequences have reached this token. + prevent_newline_after_colon: if True, it will disable generating new line \n after : + Note: Outside of model, other parameters only need to be available on + rank 0. + Outputs: Note that is size is adjusted to a lower value than + max-sequence-length if generation is terminated early. + tokens: prompt and generated tokens. size: [b, :] + generated_sequence_lengths: total length (including prompt) of + the generated sequence. size: [b] + output_log_probs: log probability of the selected tokens. size: [b, s] + """ + + args = get_args() + tokenizer = get_tokenizer() + + batch_size = tokens.size(0) + min_prompt_length = lengths.min().item() + max_sequence_length = tokens.size(1) + + if max_sequence_length > args.max_position_embeddings: + raise ValueError("Length of prompt + tokens_to_generate longer than allowed") + + if max_sequence_length * batch_size > args.max_tokens_to_oom: + raise ValueError("Too many tokens. " + str(max_sequence_length*batch_size)+ " is greater than "+str(args.max_tokens_to_oom)) + + # forward step. + forward_step = ForwardStep(model, batch_size, max_sequence_length) + + # Added termination_id to support the case that we want to terminate the + # generation once that id is generated. + if hasattr(args, 'eos_id'): + termination_id = args.eos_id + else: + termination_id = tokenizer.eod + + # =================== + # Pre-allocate memory + # =================== + + # Log probability of the sequence (prompt + generated tokens). + output_log_probs = None + output_log_probs_size = (batch_size, max_sequence_length - 1) + # Lengths of generated seuquence including including prompts. + generated_sequence_lengths = None + if mpu.is_pipeline_last_stage(): + if return_output_log_probs: + output_log_probs = torch.empty(output_log_probs_size, + dtype=torch.float32, + device=torch.cuda.current_device()) + generated_sequence_lengths = torch.ones( + batch_size, dtype=torch.int64, + device=torch.cuda.current_device()) * max_sequence_length + + # Whether we have reached a termination id. + is_generation_done = torch.zeros(batch_size, dtype=torch.uint8, + device=torch.cuda.current_device()) + + # ============= + # Run infernece + # ============= + + with torch.no_grad(): + attention_mask, position_ids = _build_attention_mask_and_position_ids( + tokens) + prev_context_length = 0 + for context_length in range(min_prompt_length, max_sequence_length): + + # Pick the slice that we need to pass through the network. + tokens2use = tokens[:, prev_context_length:context_length] + positions2use = position_ids[:, prev_context_length:context_length] + attention_mask2use = attention_mask[ + ..., prev_context_length:context_length, :context_length] + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens2use, positions2use, attention_mask2use) + logits = logits[0] + + if mpu.is_pipeline_last_stage(): + if prevent_newline_after_colon: + logits[tokens2use[:, -1] == tokenizer.tokenize(':')[0], -1, tokenizer.tokenize('\n')[0]] = -1e10 # disable "\n" after ":" + # Always the last stage should have an output. + assert logits is not None + + # Sample. + last_token_logits = logits[:, -1, :] + new_sample = sample(last_token_logits, + top_k=top_k, + top_p=top_p, + temperature=temperature, + vocab_size=tokenizer.vocab_size) + if top_p > 0.0 and top_p_decay > 0.0: + top_p = top_p * top_p_decay + if top_p_bound > 0.0: + top_p = max(top_p, top_p_bound) + + # If a prompt length is smaller or equal th current context + # length, it means we have started generating tokens + started = lengths <= context_length + # Update the tokens. + tokens[started, context_length] = new_sample[started] + + # Calculate the log probabilities. + if return_output_log_probs: + log_probs = F.log_softmax(logits, dim=2) + if return_output_log_probs: + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + indices = torch.unsqueeze( + tokens[ + :, + (prev_context_length + 1):(context_length + 1)], + 2) + output_log_probs[:, + prev_context_length:context_length] = \ + torch.gather(log_probs, 2, indices).squeeze(2) + + # Update the tokens on the first stage so the next input to + # the network is correct. + copy_from_last_to_first_pipeline_stage(batch_size, torch.int64, + tokens[:, context_length]) + + # Update the context length for the next token generation. + prev_context_length = context_length + + # Check if all the sequences have hit the termination_id. + done = None + if mpu.is_pipeline_last_stage(): + # TODO(rprenger) These stopping methods are tokenizer dependent + # instead tokenization should be in the inference loop so stop sequences can be used + if stop_on_double_eol: + hit_double_eol = (new_sample == 628).byte() & started.byte() + hit_two_eols = (new_sample == 198).byte() & (tokens[:, context_length-1] == 198).byte() & started.byte() + done_token = hit_double_eol | hit_two_eols + elif stop_on_eol: + hit_double_eol = (new_sample == 628).byte() & started.byte() + hit_eol = (new_sample == 198).byte() & started.byte() + done_token = hit_double_eol | hit_eol + else: + done_token = (new_sample == termination_id).byte() & \ + started.byte() + + just_finished = (done_token & ~is_generation_done).bool() + generated_sequence_lengths[just_finished.view(-1)] = \ + context_length + 1 + is_generation_done = is_generation_done | done_token + done = torch.all(is_generation_done) + done = broadcast_from_last_pipeline_stage(1, torch.uint8, + tensor=done) + if use_eod_token_for_early_termination and done: + break + + # =================================================== + # Update the length of based on max generated length. + # =================================================== + + tokens = tokens[:, :(context_length + 1)] + if mpu.is_pipeline_last_stage(): + if return_output_log_probs: + output_log_probs = output_log_probs[:, :context_length] + + # ====================================== + # Broadcast to the first pipeline stage. + # ====================================== + + generated_sequence_lengths = broadcast_from_last_to_first_pipeline_stage( + batch_size, torch.int64, generated_sequence_lengths) + if return_output_log_probs: + output_log_probs_size = (batch_size, context_length) + output_log_probs = broadcast_from_last_to_first_pipeline_stage( + output_log_probs_size, torch.float32, output_log_probs) + + return tokens, generated_sequence_lengths, output_log_probs + +def beam_search_and_return_on_first_stage(model, tokens, lengths, beam_size, stop_token, num_return_gen, length_penalty, prevent_newline_after_colon=True): + args = get_args() + tokenizer = get_tokenizer() + + batch_size = tokens.size(0) + assert(batch_size == 1) + prompt_length = lengths.item() + final_sequence_length = tokens.size(1) + final_sequence_length = min(final_sequence_length, args.max_position_embeddings) + + # If the context is too big, this happens + if prompt_length >= final_sequence_length: + raise ValueError("context length + tokens_to_generate too large") + + # forward step. + forward_step = ForwardStep(model, beam_size, final_sequence_length) + + beam_hyp = BeamHypotheses(beam_size, length_penalty) + best_batches = None + done = torch.zeros(1, dtype=torch.uint8, device=torch.cuda.current_device()) + scores = torch.zeros(beam_size, + dtype=torch.float32, + device=torch.cuda.current_device()).unsqueeze(1) + scores_size_tensor, tokens_size_tensor = None, None + # ============= + # Run infernece + # ============= + with torch.no_grad(): + tokens = tokens.repeat(beam_size, 1) + attention_mask, position_ids = _build_attention_mask_and_position_ids(tokens) + prev_context_length = 0 + for context_length in range(prompt_length, final_sequence_length): + + # Pick the slice that we need to pass through the network. + tokens2use = tokens[:, prev_context_length:context_length] + positions2use = position_ids[:, prev_context_length:context_length] + attention_mask2use = attention_mask[ + ..., prev_context_length:context_length, :context_length] + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step(tokens2use, positions2use, attention_mask2use) + + if mpu.is_pipeline_last_stage(): + if prevent_newline_after_colon: + logits[tokens2use[:, -1] == tokenizer.tokenize(':')[0], -1, tokenizer.tokenize('\n')[0]] = -1e10 # disable "\n" after ":" + vocab_size = logits.size(2) + log_probs = F.log_softmax(logits, dim=2) + new_scores = log_probs[:, -1, :] + scores + + if context_length == prompt_length: # if this is the first one + sorted_scores, indices = torch.sort(new_scores[0,:], descending=True) + else: + sorted_scores, indices = torch.sort(new_scores.view(-1), descending=True) + + best_beam_ids = torch.div(indices[: 2 * beam_size], vocab_size).trunc().long() + best_words = indices[:2 * beam_size] % vocab_size + best_scores = sorted_scores[: 2 * beam_size] + + next_beams = [] + for beam_token_rank, (token_id, beam_score, beam_id) in enumerate( + zip(best_words, best_scores, best_beam_ids) + ): + if token_id.item() == stop_token: + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= beam_size + if is_beam_token_worse_than_top_num_beams: + continue + beam_hyp.add( + tokens[beam_id].clone(), + beam_score, + context_length + 1 - prompt_length + ) + else: + # add next predicted token since it is not eos_token + next_beams.append((token_id, beam_score, beam_id)) + + if len(next_beams) == beam_size: + break + + if beam_hyp.is_done(best_scores.max().item(), context_length + 1 - prompt_length): + done = torch.ones(1, dtype=torch.uint8, device=torch.cuda.current_device()) + + best_batches = tokens.new([item[2] for item in next_beams]) + tokens = tokens[best_batches,:] + tokens[:, context_length] = tokens.new([item[0] for item in next_beams]) + scores = scores.new([item[1] for item in next_beams]).unsqueeze(1) + + # torch.distributed.barrier() + done = broadcast_from_last_pipeline_stage(1, torch.uint8, done) + if done: + break + + # Update the tokens on the first stage so the next input to + # the network is correct. + copy_from_last_to_first_pipeline_stage(tokens.size(), torch.int64, + tokens) + + # set inference key values to make it consistent with best beam index + best_batches = broadcast_from_last_pipeline_stage(beam_size, torch.int64, best_batches) + forward_step.inference_params.swap_key_value_dict(best_batches) + + # Update the context length for the next token generation. + prev_context_length = context_length + + if mpu.is_pipeline_last_stage(): + # if cannot find stop token, add open beams to hyps + if not done: + for beam_id in range(beam_size): + beam_hyp.add(tokens[beam_id].clone(), scores[beam_id].squeeze(), context_length + 1 - prompt_length) + + # rank based on scores + sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0], reverse=True) + num_return_gen = min(num_return_gen, len(sorted_hyps)) + scores = [sorted_hyps[i][0] for i in range(num_return_gen)] + tokens = [sorted_hyps[i][1] for i in range(num_return_gen)] + scores = torch.stack(scores, dim=0) + tokens = torch.stack(tokens, dim=0) + scores_size_tensor = torch.tensor(scores.shape, dtype=torch.int64, device=torch.cuda.current_device()) + tokens_size_tensor = torch.tensor(tokens.shape, dtype=torch.int64, device=torch.cuda.current_device()) + + scores_size_tensor = broadcast_from_last_pipeline_stage(1, torch.int64, scores_size_tensor) + tokens_size_tensor = broadcast_from_last_pipeline_stage(2, torch.int64, tokens_size_tensor) + + scores = broadcast_from_last_to_first_pipeline_stage(tuple(scores_size_tensor), torch.float32, scores) + tokens = broadcast_from_last_to_first_pipeline_stage(tuple(tokens_size_tensor), torch.int64, tokens) + + return tokens, scores + + +def _build_attention_mask_and_position_ids(tokens): + """Build the attention mask and postition ids for the input tokens.""" + + # Since we are not interested in loss-mask and reset attention/position + # is also False, eod_token is not used so it is safe to set it to None. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + data=tokens, + eod_token=None, + reset_position_ids=False, + reset_attention_mask=False, + eod_mask_loss=False) + + return attention_mask, position_ids diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/sampling.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..370773a36c087d01e75731e38724cfb35d4acd74 --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/sampling.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Sampling utilities. +Part of this code is inspired by: + - https://github.com/ari-holtzman/degen/blob/master/gen.py + - https://huggingface.co/transformers/_modules/transformers/generation_logits_process.html +""" + + +import torch + + + +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf.""" + + filter_ = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(filter_, float('-Inf')) + + + +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf.""" + + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Filteration based on the cumulative sum. + filter_ = cumulative_probs > top_p + # This shift by 1 is weird and I cannot justify it. This existed + # in the original implementation: + # https://github.com/ari-holtzman/degen/blob/master/gen.py + # and I guess it is needed so keeping it for now. + filter_[:, 1:] = filter_[:, :-1].clone() + # Make sure we at least have one token to select from. + filter_[..., 0] = 0 + + # Fill in the filtered part + filter_ = filter_.scatter(1, sorted_indices, filter_) + logits.masked_fill_(filter_, float('-Inf')) + + + +def sample(logits, top_k=0, top_p=0.0, temperature=1.0, vocab_size=None): + """ Sample and generate a token. + Note: logits has the dimension [b, v] where b is the batch size + and v is the vocabulary size. + If vocab_size is provided, we will make sure the sample that is + generated is in [0, vocab-size). This will avoid out of vocabulary + generations due to padding. + """ + + # Check logits for consistency. + assert logits.ndim == 2, 'expected the logits to be of [b, v] shape.' + assert logits.type() == 'torch.cuda.FloatTensor', \ + 'input logits should be floats.' + + + # Greedy is just simple argmax. + if top_k == 1: + assert top_p == 0.0, 'cannot set both greedy and top-p samplings.' + samples = torch.argmax(logits, dim=-1) + + # Top-k or top-p sampling. + else: + # Clone so we do not modify the inputs, + logits = logits.clone() + # Apply temperature in place. + if temperature != 1.0: + logits.div_(temperature) + + if top_k > 1: + assert top_p == 0.0, 'cannot set both top-k and top-p samplings.' + assert top_k <= logits.size(1), 'top-k is larger than logit size.' + if vocab_size: + assert top_k < vocab_size, 'top-k is larger than vocab size.' + modify_logits_for_top_k_filtering(logits, top_k) + + elif top_p > 0.0: + assert top_p <= 1.0, 'top-p should be in (0, 1].' + modify_logits_for_top_p_filtering(logits, top_p) + + # After filtering, we need to recalculate the distribution. + probs = logits.softmax(dim=-1) + samples = torch.multinomial(probs, num_samples=1).view(-1) + + # If vocab size is provided, make sure the samples are in + # in the range [0, vocab-size). + if vocab_size: + samples = torch.clamp(samples, min=0, max=(vocab_size - 1)) + + return samples diff --git a/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/tokenization.py b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..accead319a176cfbe3e0ba359abb44d3abbcb8cb --- /dev/null +++ b/docker/intel_code/llama13b/Megatron-DeepSpeed/megatron/text_generation/tokenization.py @@ -0,0 +1,124 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + +"""Tokenization utilities.""" + + +import torch + + +from megatron import get_tokenizer, get_args +from .communication import broadcast_int_list, broadcast_tensor + + +def detokenize_generations(tokens_gpu_tensor, + lengths_gpu_tensor, + return_segments): + """Detokenize the generated tokens.""" + + tokenizer = get_tokenizer() + args = get_args() + prompts_plus_generations = [] + if return_segments: + prompts_plus_generations_segments = [] + + tokens = tokens_gpu_tensor.cpu().numpy().tolist() + lengths = lengths_gpu_tensor.cpu().numpy().tolist() + for sequence_tokens, length in zip(tokens, lengths): + sequence_tokens = sequence_tokens[:length] + prompts_plus_generations.append( + tokenizer.detokenize(sequence_tokens)) + if return_segments: + words = [] + for token in sequence_tokens: + if args.tokenizer_type in ['SentencePieceTokenizer', + 'GPTSentencePieceTokenizer']: + word = tokenizer.decoder[token] + elif args.tokenizer_type == 'NullTokenizer': + word = str(token) + else: + word = tokenizer.tokenizer.decoder[token] + word = bytearray( + [tokenizer.tokenizer.byte_decoder[c] for c in word]).decode( + 'utf-8', errors='replace') + words.append(word) + prompts_plus_generations_segments.append(words) + + if return_segments: + return tokens, prompts_plus_generations, \ + prompts_plus_generations_segments + + return tokens, prompts_plus_generations + + +def tokenize_prompts(prompts=None, tokens_to_generate=None, + add_BOS=None, rank=0): + """Tokenize prompts and make them avaiable on all ranks.""" + + # On all ranks set to None so we can pass them to functions + sizes_list = None + prompts_tokens_cuda_long_tensor = None + prompts_length_cuda_long_tensor = None + + # On the specified rank, build the above. + if torch.distributed.get_rank() == rank: + assert prompts is not None + assert tokens_to_generate is not None + # Tensor of tokens padded and their unpadded length. + prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor = \ + _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS) + # We need the sizes of these tensors for the boradcast + sizes_list = [prompts_tokens_cuda_long_tensor.size(0), # Batch size + prompts_tokens_cuda_long_tensor.size(1)] # Sequence lenght + + # First, broadcast the sizes. + sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=rank) + + # Now that we have the sizes, we can boradcast the tokens + # and length tensors. + sizes = sizes_tensor.tolist() + prompts_tokens_cuda_long_tensor = broadcast_tensor( + sizes, torch.int64, tensor=prompts_tokens_cuda_long_tensor, rank=rank) + prompts_length_cuda_long_tensor = broadcast_tensor( + sizes[0], torch.int64, tensor=prompts_length_cuda_long_tensor, + rank=rank) + + return prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor + + +def _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS): + """Given a set of prompts and number of tokens to generate: + - tokenize prompts + - set the sequence length to be the max of length of prompts + plus the number of tokens we would like to generate + - pad all the sequences to this length so we can convert them + into a 2D tensor. + """ + + # Tokenize all the prompts. + tokenizer = get_tokenizer() + if add_BOS: + prompts_tokens = [[tokenizer.eod] + tokenizer.tokenize(prompt) + for prompt in prompts] + else: + prompts_tokens = [tokenizer.tokenize(prompt) for prompt in prompts] + + # Now we have a list of list of tokens which each list has a different + # size. We want to extend this list to: + # - incorporate the tokens that need to be generated + # - make all the sequences equal length. + # Get the prompts length. + prompts_length = [len(prompt_tokens) for prompt_tokens in prompts_tokens] + # Get the max prompts length. + max_prompt_len = max(prompts_length) + # Number of tokens in the each sample of the batch. + samples_length = max_prompt_len + tokens_to_generate + # Now update the list of list to be of the same size: samples_length. + for prompt_tokens, prompt_length in zip(prompts_tokens, prompts_length): + padding_size = samples_length - prompt_length + prompt_tokens.extend([tokenizer.eod] * padding_size) + + # Now we are in a structured format, we can convert to tensors. + prompts_tokens_tensor = torch.cuda.LongTensor(prompts_tokens) + prompts_length_tensor = torch.cuda.LongTensor(prompts_length) + + return prompts_tokens_tensor, prompts_length_tensor